repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
phobson/statsmodels | statsmodels/discrete/tests/results/results_poisson_constrained.py | 34 | 28325 | import numpy as np
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
for i,att in enumerate(['params', 'bse', 'tvalues', 'pvalues']):
self[att] = self.params_table[:,i]
est = dict(
rank = 7,
N = 10,
ic = 3,
k = 8,
k_eq = 1,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 1,
ll = -33.45804471711131,
k_eq_model = 1,
ll_0 = -349.6684656479622,
df_m = 6,
chi2 = 632.4208418617018,
p = 2.3617193197e-133,
r2_p = .9043149497192691,
cmdline = "poisson deaths lnpyears smokes i.agecat",
cmd = "poisson",
predict = "poisso_p",
estat_cmd = "poisson_estat",
gof = "poiss_g",
chi2type = "LR",
opt = "moptimize",
vce = "oim",
title = "Poisson regression",
user = "poiss_lf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "deaths",
properties = "b V",
)
params_table = np.array([
.66308184237808, .63593388706566, 1.0426899019923, .29709193621918,
-.58332567281917, 1.9094893575753, np.nan, 1.9599639845401,
0, .84966723812924, .94279599903649, .90122066597395,
.36747100512904, -.99817896475073, 2.6975134410092, np.nan,
1.9599639845401, 0, 0, np.nan,
np.nan, np.nan, np.nan, np.nan,
np.nan, 1.9599639845401, 0, 1.3944392032504,
.25613243411925, 5.4442117338454, 5.203529593e-08, .8924288571041,
1.8964495493967, np.nan, 1.9599639845401, 0,
2.389284381366, .48305517266329, 4.9461935542328, 7.567871319e-07,
1.4425136404002, 3.3360551223318, np.nan, 1.9599639845401,
0, 2.8385093615484, .98099727008295, 2.8934936397003,
.00380982006764, .91579004325369, 4.7612286798431, np.nan,
1.9599639845401, 0, 2.9103531988515, 1.500316321385,
1.9398263935201, .05240079188831, -.03021275648066, 5.8509191541838,
np.nan, 1.9599639845401, 0, -4.724924181641,
6.0276019460727, -.78388125558284, .43310978942119, -16.538806909087,
7.088958545805, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov = np.array([
.40441190871844, -.59566294916097, 0, .1055698685775,
.28413388045122, .61269322798077, .94624135329227, -3.8311942353131,
-.59566294916097, .88886429579921, 0, -.15587944298625,
-.4190789999425, -.90299843943229, -1.3940094688194, 5.6335527795822,
0, 0, 0, 0,
0, 0, 0, 0,
.1055698685775, -.15587944298625, 0, .06560382380785,
.10360281461667, .18937107288073, .27643306166968, -1.029211453947,
.28413388045122, -.4190789999425, 0, .10360281461667,
.23334229983676, .45990880867889, .69424104947043, -2.7206801001387,
.61269322798077, -.90299843943229, 0, .18937107288073,
.45990880867889, .96235564391021, 1.4630024143274, -5.8333014154113,
.94624135329227, -1.3940094688194, 0, .27643306166968,
.69424104947043, 1.4630024143274, 2.2509490642142, -8.993394678922,
-3.8311942353131, 5.6335527795822, 0, -1.029211453947,
-2.7206801001387, -5.8333014154113, -8.993394678922, 36.331985220299
]).reshape(8,8)
cov_colnames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov_rownames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
results_noexposure_noconstraint = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank = 6,
N = 10,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 1,
ll = -33.6001534405213,
k_eq_model = 1,
ll_0 = -495.0676356770329,
df_m = 5,
chi2 = 922.9349644730232,
p = 2.8920463572e-197,
r2_p = .9321301757191799,
cmdline = "poisson deaths smokes i.agecat, exposure(pyears)",
cmd = "poisson",
predict = "poisso_p",
estat_cmd = "poisson_estat",
offset = "ln(pyears)",
gof = "poiss_g",
chi2type = "LR",
opt = "moptimize",
vce = "oim",
title = "Poisson regression",
user = "poiss_lf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "deaths",
properties = "b V",
)
params_table = np.array([
.35453563725291, .10737411818853, 3.3018723993653, .00096041750265,
.14408623273163, .56498504177418, np.nan, 1.9599639845401,
0, 0, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan,
1.9599639845401, 0, 1.4840070063099, .19510337263434,
7.606260139291, 2.821411159e-14, 1.1016114226842, 1.8664025899355,
np.nan, 1.9599639845401, 0, 2.6275051184579,
.18372726944827, 14.301116684248, 2.153264398e-46, 2.2674062873614,
2.9876039495544, np.nan, 1.9599639845401, 0,
3.350492785161, .18479918093323, 18.130452571495, 1.832448146e-73,
2.9882930461593, 3.7126925241626, np.nan, 1.9599639845401,
0, 3.7000964518246, .19221951212105, 19.24932807807,
1.430055953e-82, 3.3233531309415, 4.0768397727077, np.nan,
1.9599639845401, 0, -7.919325711822, .19176181876223,
-41.297719029467, 0, -8.2951719702059, -7.5434794534381,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov = np.array([
.01152920125677, 0, -.00061561668833, -.00090117889461,
-.00087280941113, -.00045274641397, -.00921219275997, 0,
0, 0, 0, 0,
0, 0, -.00061561668833, 0,
.0380653260133, .02945988432334, .02945836949789, .0294359396881,
-.0289198676971, -.00090117889461, 0, .02945988432334,
.03375570953892, .0294799877675, .02944715358419, -.02869169455392,
-.00087280941113, 0, .02945836949789, .0294799877675,
.03415073727359, .02944603952766, -.02871436265941, -.00045274641397,
0, .0294359396881, .02944715358419, .02944603952766,
.03694834084006, -.02905000614546, -.00921219275997, 0,
-.0289198676971, -.02869169455392, -.02871436265941, -.02905000614546,
.036772595135]).reshape(7,7)
cov_colnames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
results_exposure_noconstraint = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank = 6,
N = 10,
ic = 4,
k = 8,
k_eq = 1,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 1,
ll = -33.46699798755848,
k_eq_model = 1,
df_m = 5,
chi2 = 452.5895246742914,
p = 1.35732711092e-95,
r2_p = np.nan,
cmdline = "poisson deaths lnpyears smokes i.agecat, constraints(1)",
cmd = "poisson",
predict = "poisso_p",
estat_cmd = "poisson_estat",
gof = "poiss_g",
chi2type = "Wald",
opt = "moptimize",
vce = "oim",
title = "Poisson regression",
user = "poiss_lf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "deaths",
properties = "b V",
)
params_table = np.array([
.57966535352347, .13107152221057, 4.4225117992619, 9.756001957e-06,
.32276989059191, .83656081645503, np.nan, 1.9599639845401,
0, .97254074124891, .22289894431919, 4.3631464663029,
.00001282050472, .5356668381913, 1.4094146443065, np.nan,
1.9599639845401, 0, 0, np.nan,
np.nan, np.nan, np.nan, np.nan,
np.nan, 1.9599639845401, 0, 1.3727621378494,
.19798042377276, 6.9338276567436, 4.096036246e-12, .98472763761078,
1.760796638088, np.nan, 1.9599639845401, 0,
2.3307703209845, .20530981936838, 11.352454199, 7.210981748e-30,
1.92837046935, 2.7331701726189, np.nan, 1.9599639845401,
0, 2.71338890728, .29962471107816, 9.0559583604312,
1.353737255e-19, 2.1261352646886, 3.3006425498714, np.nan,
1.9599639845401, 0, 2.71338890728, .29962471107816,
9.0559583604312, 1.353737255e-19, 2.1261352646886, 3.3006425498714,
np.nan, 1.9599639845401, 0, -3.9347864312059,
1.2543868840549, -3.1368204508696, .00170790683415, -6.3933395466329,
-1.476233315779, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov = np.array([
.0171797439346, -.02561346650005, 0, .00445310785396,
.01204526460873, .03142116278001, .03142116278001, -.16245493266167,
-.02561346650005, .04968393937861, 0, -.0069699735991,
-.01845598801461, -.04723465558226, -.04723465558226, .2326939064726,
0, 0, 0, 0,
0, 0, 0, 0,
.00445310785396, -.0069699735991, 0, .03919624819724,
.03254829669461, .03756752462584, .03756752462584, -.07124751761252,
.01204526460873, -.01845598801461, 0, .03254829669461,
.04215212192908, .05145895528528, .05145895528528, -.14290240509701,
.03142116278001, -.04723465558226, 0, .03756752462584,
.05145895528528, .08977496748867, .08977496748867, -.32621483141938,
.03142116278001, -.04723465558226, 0, .03756752462584,
.05145895528528, .08977496748867, .08977496748867, -.32621483141938,
-.16245493266167, .2326939064726, 0, -.07124751761252,
-.14290240509701, -.32621483141938, -.32621483141938, 1.5734864548889
]).reshape(8,8)
cov_colnames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov_rownames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
results_noexposure_constraint = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank = 5,
N = 10,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 1,
ll = -38.45090497564205,
k_eq_model = 1,
df_m = 4,
chi2 = 641.6446542589836,
p = 1.5005477751e-137,
r2_p = np.nan,
cmdline = "poisson deaths smokes i.agecat, exposure(pyears) constraints(1)",
cmd = "poisson",
predict = "poisso_p",
estat_cmd = "poisson_estat",
offset = "ln(pyears)",
gof = "poiss_g",
chi2type = "Wald",
opt = "moptimize",
vce = "oim",
title = "Poisson regression",
user = "poiss_lf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "deaths",
properties = "b V",
)
params_table = np.array([
.34304077058284, .1073083520206, 3.196776058186, .00138972774083,
.13272026538212, .55336127578356, np.nan, 1.9599639845401,
0, 0, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan,
1.9599639845401, 0, 1.4846230896448, .19510453584194,
7.6093724999174, 2.754298692e-14, 1.1022252261742, 1.8670209531154,
np.nan, 1.9599639845401, 0, 2.6284071093765,
.18373002757074, 14.305811326156, 2.012766793e-46, 2.2683028724593,
2.9885113462937, np.nan, 1.9599639845401, 0,
3.4712405808805, .17983994458502, 19.301833020969, 5.183735658e-83,
3.1187607665121, 3.8237203952488, np.nan, 1.9599639845401,
0, 3.4712405808805, .17983994458502, 19.301833020969,
5.183735658e-83, 3.1187607665121, 3.8237203952488, np.nan,
1.9599639845401, 0, -7.9101515866812, .19164951521841,
-41.274049546467, 0, -8.2857777341639, -7.5345254391986,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov = np.array([
.01151508241338, 0, -.00061947268694, -.00090708285562,
-.00074959767622, -.00074959767622, -.00917958318314, 0,
0, 0, 0, 0,
0, 0, -.00061947268694, 0,
.0380657799061, .02946056271023, .0294520905375, .0294520905375,
-.02891793401778, -.00090708285562, 0, .02946056271023,
.03375672303114, .02947081310555, .02947081310555, -.02868865719866,
-.00074959767622, 0, .0294520905375, .02947081310555,
.03234240566834, .03234240566834, -.02881420109427, -.00074959767622,
0, .0294520905375, .02947081310555, .03234240566834,
.03234240566834, -.02881420109427, -.00917958318314, 0,
-.02891793401778, -.02868865719866, -.02881420109427, -.02881420109427,
.03672953668345]).reshape(7,7)
cov_colnames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
results_exposure_constraint = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank = 6,
N = 10,
ic = 3,
k = 8,
k_eq = 1,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 1,
ll = -33.78306559091298,
k_eq_model = 1,
df_m = 5,
chi2 = 526.719430888018,
p = 1.3614066522e-111,
r2_p = np.nan,
cmdline = "poisson deaths lnpyears smokes i.agecat, constraints(2)",
cmd = "poisson",
predict = "poisso_p",
estat_cmd = "poisson_estat",
gof = "poiss_g",
chi2type = "Wald",
opt = "moptimize",
vce = "oim",
title = "Poisson regression",
user = "poiss_lf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "deaths",
properties = "b V",
)
params_table = np.array([
1.1598786864273, .13082965708054, 8.8655639119598, 7.611783820e-19,
.90345727043975, 1.4163001024149, np.nan, 1.9599639845401,
0, .12111539473831, .22317899375276, .54268277090847,
.58734823873758, -.31630739512299, .55853818459962, np.nan,
1.9599639845401, 0, 0, np.nan,
np.nan, np.nan, np.nan, np.nan,
np.nan, 1.9599639845401, 0, 1.5276244194375,
.19848759770871, 7.6963217705896, 1.400389019e-14, 1.1385958765506,
1.9166529623245, np.nan, 1.9599639845401, 0,
2.7415571106656, .20647039325801, 13.278209371354, 3.097119459e-40,
2.3368825760061, 3.1462316453252, np.nan, 1.9599639845401,
0, 3.587300073596, .30160673316211, 11.893965482753,
1.272196529e-32, 2.9961617391034, 4.1784384080885, np.nan,
1.9599639845401, 0, 4.087300073596, .30160673316211,
13.551753406643, 7.735990122e-42, 3.4961617391034, 4.6784384080885,
np.nan, 1.9599639845401, 0, -9.4376201542802,
1.2537557101599, -7.5274792990385, 5.172920628e-14, -11.894936191605,
-6.9803041169553, np.nan, 1.9599639845401, 0
]).reshape(8,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov = np.array([
.01711639917181, -.02559852137367, 0, .00475026273828,
.012305588195, .03167368550108, .03167368550108, -.16210959536359,
-.02559852137367, .0498088632525, 0, -.00783669874902,
-.01946551099054, -.0482099128044, -.0482099128044, .23336630265161,
0, 0, 0, 0,
0, 0, 0, 0,
.00475026273828, -.00783669874902, 0, .03939732644417,
.0328943776068, .0382554606876, .0382554606876, -.07382466315002,
.012305588195, -.01946551099054, 0, .0328943776068,
.04263002329212, .05226051095238, .05226051095238, -.14512177326509,
.03167368550108, -.0482099128044, 0, .0382554606876,
.05226051095238, .09096662148872, .09096662148872, -.32873181469848,
.03167368550108, -.0482099128044, 0, .0382554606876,
.05226051095238, .09096662148872, .09096662148872, -.32873181469848,
-.16210959536359, .23336630265161, 0, -.07382466315002,
-.14512177326509, -.32873181469848, -.32873181469848, 1.5719033807586
]).reshape(8,8)
cov_colnames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov_rownames = 'lnpyears smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
results_noexposure_constraint2 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank = 5,
N = 10,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 1,
ll = -34.5367006700131,
k_eq_model = 1,
df_m = 4,
chi2 = 554.4168921897579,
p = 1.1331093797e-118,
r2_p = np.nan,
cmdline = "poisson deaths smokes i.agecat, exposure(pyears) constraints(2)",
cmd = "poisson",
predict = "poisso_p",
estat_cmd = "poisson_estat",
offset = "ln(pyears)",
gof = "poiss_g",
chi2type = "Wald",
opt = "moptimize",
vce = "oim",
title = "Poisson regression",
user = "poiss_lf",
crittype = "log likelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "deaths",
properties = "b V",
)
params_table = np.array([
.35978347114582, .10730668667519, 3.3528522992687, .00079983377167,
.14946622996212, .57010071232952, np.nan, 1.9599639845401,
0, 0, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan,
1.9599639845401, 0, 1.4837272702102, .19510269288329,
7.6048528509946, 2.852282383e-14, 1.1013330188722, 1.8661215215483,
np.nan, 1.9599639845401, 0, 2.6270956495127,
.18372567328363, 14.299012231442, 2.219372691e-46, 2.2669999468414,
2.987191352184, np.nan, 1.9599639845401, 0,
3.2898291023835, .17982035319735, 18.295087535352, 9.055555257e-75,
2.9373876864294, 3.6422705183376, np.nan, 1.9599639845401,
0, 3.7898291023835, .17982035319735, 21.075640409983,
1.330935038e-98, 3.4373876864294, 4.1422705183376, np.nan,
1.9599639845401, 0, -7.9235211042587, .19177810950798,
-41.316087245761, 0, -8.2993992919175, -7.5476429165999,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov = np.array([
.01151472500521, 0, -.00061274288972, -.00089685568608,
-.00069335681347, -.00069335681347, -.00921031399899, 0,
0, 0, 0, 0,
0, 0, -.00061274288972, 0,
.03806506077031, .02945948985187, .02944866089267, .02944866089267,
-.02892164840477, -.00089685568608, 0, .02945948985187,
.03375512302352, .02946576868665, .02946576868665, -.0286943943397,
-.00069335681347, 0, .02944866089267, .02946576868665,
.03233535942402, .03233535942402, -.02885716752919, -.00069335681347,
0, .02944866089267, .02946576868665, .03233535942402,
.03233535942402, -.02885716752919, -.00921031399899, 0,
-.02892164840477, -.0286943943397, -.02885716752919, -.02885716752919,
.03677884328645]).reshape(7,7)
cov_colnames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
results_exposure_constraint2 = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank = 5,
N = 10,
ic = 3,
k = 7,
k_eq = 1,
k_dv = 1,
converged = 1,
rc = 0,
k_autoCns = 1,
ll = -34.5367006700131,
k_eq_model = 1,
df_m = 4,
chi2 = 582.5215805315736,
p = 9.3932644024e-125,
r2_p = np.nan,
cmdline = "poisson deaths smokes i.agecat, exposure(pyears) constraints(2) vce(robust)",
cmd = "poisson",
predict = "poisso_p",
estat_cmd = "poisson_estat",
offset = "ln(pyears)",
gof = "poiss_g",
chi2type = "Wald",
opt = "moptimize",
vcetype = "Robust",
vce = "robust",
title = "Poisson regression",
user = "poiss_lf",
crittype = "log pseudolikelihood",
ml_method = "e2",
singularHmethod = "m-marquardt",
technique = "nr",
which = "max",
depvar = "deaths",
properties = "b V",
)
params_table = np.array([
.35978347114582, .1172393358046, 3.0687948603312, .00214924117257,
.1299985953974, .58956834689424, np.nan, 1.9599639845401,
0, 0, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan,
1.9599639845401, 0, 1.4837272702102, .21969092615175,
6.7537030145039, 1.441186055e-11, 1.0531409672225, 1.9143135731979,
np.nan, 1.9599639845401, 0, 2.6270956495127,
.20894895542061, 12.572906355164, 2.975796525e-36, 2.217563222281,
3.0366280767443, np.nan, 1.9599639845401, 0,
3.2898291023835, .2211846822073, 14.873675109654, 4.885611722e-50,
2.8563150913252, 3.7233431134417, np.nan, 1.9599639845401,
0, 3.7898291023835, .2211846822073, 17.134229479922,
8.243780087e-66, 3.3563150913252, 4.2233431134417, np.nan,
1.9599639845401, 0, -7.9235211042587, .2479876721169,
-31.951270144281, 5.18748229e-224, -8.4095680102177, -7.4374741982996,
np.nan, 1.9599639845401, 0]).reshape(7,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov = np.array([
.0137450618599, 0, .00249770233028, .00412347653263,
.00486142402447, .00486142402447, -.01620342093134, 0,
0, 0, 0, 0,
0, 0, .00249770233028, 0,
.04826410303341, .04389964215014, .04391744129373, .04391744129373,
-.04609122424924, .00412347653263, 0, .04389964215014,
.04365966597136, .04367917402468, .04367917402468, -.04726310745444,
.00486142402447, 0, .04391744129373, .04367917402468,
.04892266364314, .04892266364314, -.04794543190806, .00486142402447,
0, .04391744129373, .04367917402468, .04892266364314,
.04892266364314, -.04794543190806, -.01620342093134, 0,
-.04609122424924, -.04726310745444, -.04794543190806, -.04794543190806,
.06149788552196]).reshape(7,7)
cov_colnames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
cov_rownames = 'smokes 1b.agecat 2.agecat 3.agecat 4.agecat 5.agecat _cons'.split()
results_exposure_constraint2_robust = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
| bsd-3-clause |
moutai/scikit-learn | sklearn/cluster/tests/test_k_means.py | 10 | 29147 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_, km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:, :2], n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be', km.fit, X)
| bsd-3-clause |
zhushun0008/sms-tools | software/transformations/stochasticTransformations.py | 26 | 1259 | # functions that implement transformations using the hpsModel
import numpy as np
from scipy.interpolate import interp1d
def stochasticTimeScale(stocEnv, timeScaling):
"""
Time scaling of the stochastic representation of a sound
stocEnv: stochastic envelope
timeScaling: scaling factors, in time-value pairs
returns ystocEnv: stochastic envelope
"""
if (timeScaling.size % 2 != 0): # raise exception if array not even length
raise ValueError("Time scaling array does not have an even size")
L = stocEnv[:,0].size # number of input frames
outL = int(L*timeScaling[-1]/timeScaling[-2]) # number of synthesis frames
# create interpolation object with the time scaling values
timeScalingEnv = interp1d(timeScaling[::2]/timeScaling[-2], timeScaling[1::2]/timeScaling[-1])
indexes = (L-1)*timeScalingEnv(np.arange(outL)/float(outL)) # generate output time indexes
ystocEnv = stocEnv[0,:] # first output frame is same than input
for l in indexes[1:]: # step through the output frames
ystocEnv = np.vstack((ystocEnv, stocEnv[round(l),:])) # get the closest input frame
return ystocEnv
| agpl-3.0 |
tedlaz/pyted | pykoinoxrista/koinoxrista/greek_str.py | 1 | 2056 | import decimal
def isNum(value): # Einai to value arithmos, i den einai ?
"""
use: Returns False if value is not a number , True otherwise
input parameters:
1.value : the value to check against.
output: True or False
"""
if not value:
return False
try:
float(value)
except ValueError:
return False
else:
return True
def dec(poso, dekadika=2):
"""
use : Given a number, it returns a decimal with a specific number
of decimal digits
input Parameters:
1.poso : The number for conversion in any format (e.g. string or int)
2.dekadika : The number of decimals (default 2)
output: A decimal number
"""
PLACES = decimal.Decimal(10) ** (-1 * dekadika)
if isNum(poso):
tmp = decimal.Decimal(str(poso))
else:
tmp = decimal.Decimal('0')
return tmp.quantize(PLACES)
# Utility Functions
def gr_num_str(number, zero2null=False, decimals=2):
'''
Returns Greek Decimal(2) number or
if zero2null is True '' (empty string)
else 0,00
'''
number = dec(number, decimals)
if abs(number) <= 0.004:
if zero2null:
return ''
else:
return '0,00'
s = '%.2f' % number
a, d = s.split('.')
groups = []
while a and a[-1].isdigit():
groups.append(a[-3:])
a = a[:-3]
return a + '.'.join(reversed(groups)) + ',' + d
def gr_int_str(number):
'Returns Greek Decimal(1) number'
strn = gr_num_str(number, False, 0)
left, right = strn.split(',')
return left
def gr_date_str(imnia):
'''
imnia must be iso date YYYY-MM-DD
returns dd/mm/yyyy
'''
y, m, d = imnia.split('-')
return '%s/%s/%s' % (d, m, y)
if __name__ == '__main__':
num = -1002536.64589
dat = '2015-10-30'
print(gr_int_str(num))
print(gr_num_str(num))
print(gr_num_str(num, True))
print(gr_date_str(dat))
matr = [[1, 1, 1, 1], [2, 2, 2, 6]]
a = list(zip(*matr))
print([sum(b) for b in a])
| gpl-3.0 |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/addons/report/controllers/main.py | 40 | 6657 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web.http import Controller, route, request
from openerp.addons.web.controllers.main import _serialize_exception
from openerp.osv import osv
import simplejson
from werkzeug import exceptions, url_decode
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from werkzeug.datastructures import Headers
from reportlab.graphics.barcode import createBarcodeDrawing
class ReportController(Controller):
#------------------------------------------------------
# Report controllers
#------------------------------------------------------
@route([
'/report/<path:converter>/<reportname>',
'/report/<path:converter>/<reportname>/<docids>',
], type='http', auth='user', website=True)
def report_routes(self, reportname, docids=None, converter=None, **data):
report_obj = request.registry['report']
cr, uid, context = request.cr, request.uid, request.context
if docids:
docids = [int(i) for i in docids.split(',')]
options_data = None
if data.get('options'):
options_data = simplejson.loads(data['options'])
if data.get('context'):
# Ignore 'lang' here, because the context in data is the one from the webclient *but* if
# the user explicitely wants to change the lang, this mechanism overwrites it.
data_context = simplejson.loads(data['context'])
if data_context.get('lang'):
del data_context['lang']
context.update(data_context)
if converter == 'html':
html = report_obj.get_html(cr, uid, docids, reportname, data=options_data, context=context)
return request.make_response(html)
elif converter == 'pdf':
pdf = report_obj.get_pdf(cr, uid, docids, reportname, data=options_data, context=context)
pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', len(pdf))]
return request.make_response(pdf, headers=pdfhttpheaders)
else:
raise exceptions.HTTPException(description='Converter %s not implemented.' % converter)
#------------------------------------------------------
# Misc. route utils
#------------------------------------------------------
@route(['/report/barcode', '/report/barcode/<type>/<path:value>'], type='http', auth="user")
def report_barcode(self, type, value, width=600, height=100):
"""Contoller able to render barcode images thanks to reportlab.
Samples:
<img t-att-src="'/report/barcode/QR/%s' % o.name"/>
<img t-att-src="'/report/barcode/?type=%s&value=%s&width=%s&height=%s' %
('QR', o.name, 200, 200)"/>
:param type: Accepted types: 'Codabar', 'Code11', 'Code128', 'EAN13', 'EAN8', 'Extended39',
'Extended93', 'FIM', 'I2of5', 'MSI', 'POSTNET', 'QR', 'Standard39', 'Standard93',
'UPCA', 'USPS_4State'
"""
try:
width, height = int(width), int(height)
barcode = createBarcodeDrawing(
type, value=value, format='png', width=width, height=height
)
barcode = barcode.asString('png')
except (ValueError, AttributeError):
raise exceptions.HTTPException(description='Cannot convert into barcode.')
return request.make_response(barcode, headers=[('Content-Type', 'image/png')])
@route(['/report/download'], type='http', auth="user")
def report_download(self, data, token):
"""This function is used by 'qwebactionmanager.js' in order to trigger the download of
a pdf/controller report.
:param data: a javascript array JSON.stringified containg report internal url ([0]) and
type [1]
:returns: Response with a filetoken cookie and an attachment header
"""
requestcontent = simplejson.loads(data)
url, type = requestcontent[0], requestcontent[1]
try:
if type == 'qweb-pdf':
reportname = url.split('/report/pdf/')[1].split('?')[0]
docids = None
if '/' in reportname:
reportname, docids = reportname.split('/')
if docids:
# Generic report:
response = self.report_routes(reportname, docids=docids, converter='pdf')
else:
# Particular report:
data = url_decode(url.split('?')[1]).items() # decoding the args represented in JSON
response = self.report_routes(reportname, converter='pdf', **dict(data))
response.headers.add('Content-Disposition', 'attachment; filename=%s.pdf;' % reportname)
response.set_cookie('fileToken', token)
return response
elif type =='controller':
reqheaders = Headers(request.httprequest.headers)
response = Client(request.httprequest.app, BaseResponse).get(url, headers=reqheaders, follow_redirects=True)
response.set_cookie('fileToken', token)
return response
else:
return
except osv.except_osv, e:
se = _serialize_exception(e)
error = {
'code': 200,
'message': "Odoo Server Error",
'data': se
}
return request.make_response(simplejson.dumps(error))
@route(['/report/check_wkhtmltopdf'], type='json', auth="user")
def check_wkhtmltopdf(self):
return request.registry['report']._check_wkhtmltopdf()
| agpl-3.0 |
numerigraphe/odoo | openerp/report/interface.py | 380 | 9868 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import re
from lxml import etree
import openerp
import openerp.tools as tools
import openerp.modules
import print_xml
import render
import urllib
from openerp import SUPERUSER_ID
from openerp.report.render.rml2pdf import customfonts
#
# coerce any type to a unicode string (to preserve non-ascii characters)
# and escape XML entities
#
def toxml(value):
unicode_value = tools.ustr(value)
return unicode_value.replace('&', '&').replace('<','<').replace('>','>')
class report_int(object):
_reports = {}
def __init__(self, name, register=True):
if register:
assert openerp.conf.deprecation.allow_report_int_registration
assert name.startswith('report.'), 'Report names should start with "report.".'
assert name not in self._reports, 'The report "%s" already exists.' % name
self._reports[name] = self
else:
# The report is instanciated at each use site, which is ok.
pass
self.__name = name
self.name = name
self.id = 0
self.name2 = '.'.join(name.split('.')[1:])
# TODO the reports have methods with a 'title' kwarg that is redundant with this attribute
self.title = None
def create(self, cr, uid, ids, datas, context=None):
return False
class report_rml(report_int):
"""
Automatically builds a document using the transformation process:
XML -> DATAS -> RML -> PDF -> HTML
using a XSL:RML transformation
"""
def __init__(self, name, table, tmpl, xsl, register=True):
super(report_rml, self).__init__(name, register=register)
self.table = table
self.internal_header=False
self.tmpl = tmpl
self.xsl = xsl
self.bin_datas = {}
self.generators = {
'pdf': self.create_pdf,
'html': self.create_html,
'raw': self.create_raw,
'sxw': self.create_sxw,
'txt': self.create_txt,
'odt': self.create_odt,
'html2html' : self.create_html2html,
'makohtml2html' :self.create_makohtml2html,
}
def create(self, cr, uid, ids, datas, context):
registry = openerp.registry(cr.dbname)
xml = self.create_xml(cr, uid, ids, datas, context)
xml = tools.ustr(xml).encode('utf8')
report_type = datas.get('report_type', 'pdf')
if report_type == 'raw':
return xml, report_type
registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context)
rml = self.create_rml(cr, xml, uid, context)
ir_actions_report_xml_obj = registry['ir.actions.report.xml']
report_xml_ids = ir_actions_report_xml_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context)
self.title = report_xml_ids and ir_actions_report_xml_obj.browse(cr,uid,report_xml_ids)[0].name or 'OpenERP Report'
create_doc = self.generators[report_type]
pdf = create_doc(rml, title=self.title)
return pdf, report_type
def create_xml(self, cr, uid, ids, datas, context=None):
if not context:
context={}
doc = print_xml.document(cr, uid, datas, {})
self.bin_datas.update( doc.bin_datas or {})
doc.parse(self.tmpl, ids, self.table, context)
xml = doc.xml_get()
doc.close()
return self.post_process_xml_data(cr, uid, xml, context)
def post_process_xml_data(self, cr, uid, xml, context=None):
if not context:
context={}
# find the position of the 3rd tag
# (skip the <?xml ...?> and the "root" tag)
iter = re.finditer('<[^>]*>', xml)
i = iter.next()
i = iter.next()
pos_xml = i.end()
doc = print_xml.document(cr, uid, {}, {})
tmpl_path = openerp.modules.get_module_resource('base', 'report', 'corporate_defaults.xml')
doc.parse(tmpl_path, [uid], 'res.users', context)
corporate_header = doc.xml_get()
doc.close()
# find the position of the tag after the <?xml ...?> tag
iter = re.finditer('<[^>]*>', corporate_header)
i = iter.next()
pos_header = i.end()
return xml[:pos_xml] + corporate_header[pos_header:] + xml[pos_xml:]
#
# TODO: The translation doesn't work for "<tag t="1">textext<tag> tex</tag>text</tag>"
#
def create_rml(self, cr, xml, uid, context=None):
if self.tmpl=='' and not self.internal_header:
self.internal_header=True
if not context:
context={}
registry = openerp.registry(cr.dbname)
ir_translation_obj = registry['ir.translation']
# In some case we might not use xsl ...
if not self.xsl:
return xml
stylesheet_file = tools.file_open(self.xsl)
try:
stylesheet = etree.parse(stylesheet_file)
xsl_path, _ = os.path.split(self.xsl)
for import_child in stylesheet.findall('./import'):
if 'href' in import_child.attrib:
imp_file = import_child.get('href')
_, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True)
import_child.set('href', urllib.quote(str(imp_file)))
imp_file.close()
finally:
stylesheet_file.close()
#TODO: get all the translation in one query. That means we have to:
# * build a list of items to translate,
# * issue the query to translate them,
# * (re)build/update the stylesheet with the translated items
def translate(doc, lang):
translate_aux(doc, lang, False)
def translate_aux(doc, lang, t):
for node in doc:
t = t or node.get("t")
if t:
text = None
tail = None
if node.text:
text = node.text.strip().replace('\n',' ')
if node.tail:
tail = node.tail.strip().replace('\n',' ')
if text:
translation1 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, text)
if translation1:
node.text = node.text.replace(text, translation1)
if tail:
translation2 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, tail)
if translation2:
node.tail = node.tail.replace(tail, translation2)
translate_aux(node, lang, t)
if context.get('lang', False):
translate(stylesheet.iter(), context['lang'])
transform = etree.XSLT(stylesheet)
xml = etree.tostring(
transform(etree.fromstring(xml)))
return xml
def create_pdf(self, rml, localcontext = None, logo=None, title=None):
if not localcontext:
localcontext = {}
localcontext.update({'internal_header':self.internal_header})
if logo:
self.bin_datas['logo'] = logo
else:
if 'logo' in self.bin_datas:
del self.bin_datas['logo']
obj = render.rml(rml, localcontext, self.bin_datas, self._get_path(), title)
obj.render()
return obj.get()
def create_html(self, rml, localcontext = None, logo=None, title=None):
obj = render.rml2html(rml, localcontext, self.bin_datas)
obj.render()
return obj.get()
def create_txt(self, rml,localcontext, logo=None, title=None):
obj = render.rml2txt(rml, localcontext, self.bin_datas)
obj.render()
return obj.get().encode('utf-8')
def create_html2html(self, rml, localcontext = None, logo=None, title=None):
obj = render.html2html(rml, localcontext, self.bin_datas)
obj.render()
return obj.get()
def create_raw(self,rml, localcontext = None, logo=None, title=None):
obj = render.odt2odt(etree.XML(rml),localcontext)
obj.render()
return etree.tostring(obj.get())
def create_sxw(self,rml,localcontext = None):
obj = render.odt2odt(rml,localcontext)
obj.render()
return obj.get()
def create_odt(self,rml,localcontext = None):
obj = render.odt2odt(rml,localcontext)
obj.render()
return obj.get()
def create_makohtml2html(self,html,localcontext = None):
obj = render.makohtml2html(html,localcontext)
obj.render()
return obj.get()
def _get_path(self):
return [
self.tmpl.replace(os.path.sep, '/').rsplit('/', 1)[0],
'addons',
tools.config['root_path']
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jkburges/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/qt_unittest.py | 113 | 6923 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import os
from copy import deepcopy
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import port_testcase
from webkitpy.port.qt import QtPort
from webkitpy.tool.mocktool import MockOptions
class QtPortTest(port_testcase.PortTestCase):
port_name = 'qt-mac'
port_maker = QtPort
search_paths_cases = [
{'search_paths':['qt-mac-wk2', 'qt-wk2', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':True, 'qt_version':'5.0'},
{'search_paths':['qt-wk2', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':True, 'qt_version':'5.0'},
{'search_paths':['qt-wk2', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':True, 'qt_version':'5.0'},
{'search_paths':['qt-wk1', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False, 'qt_version':'5.0'},
{'search_paths':['qt-wk1', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False, 'qt_version':'5.0'},
{'search_paths':['qt-wk1', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'5.0'},
{'search_paths':['qt-wk1', 'qt-5.1', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'5.1'},
]
def _assert_search_path(self, search_paths, os_name, use_webkit2=False, qt_version='5.0'):
# FIXME: Port constructors should not "parse" the port name, but
# rather be passed components (directly or via setters). Once
# we fix that, this method will need a re-write.
host = MockSystemHost(os_name=os_name)
host.executive = MockExecutive2(self._qt_version(qt_version))
port_name = 'qt-' + os_name
port = self.make_port(host=host, qt_version=qt_version, port_name=port_name,
options=MockOptions(webkit_test_runner=use_webkit2, platform='qt'))
absolute_search_paths = map(port._webkit_baseline_path, search_paths)
self.assertEqual(port.baseline_search_path(), absolute_search_paths)
def _assert_expectations_files(self, search_paths, os_name, use_webkit2=False, qt_version='5.0'):
# FIXME: Port constructors should not "parse" the port name, but
# rather be passed components (directly or via setters). Once
# we fix that, this method will need a re-write.
host = MockSystemHost(os_name=os_name)
host.executive = MockExecutive2(self._qt_version(qt_version))
port_name = 'qt-' + os_name
port = self.make_port(host=host, qt_version=qt_version, port_name=port_name,
options=MockOptions(webkit_test_runner=use_webkit2, platform='qt'))
self.assertEqual(port.expectations_files(), search_paths)
def _qt_version(self, qt_version):
if qt_version in '5.0':
return 'QMake version 2.01a\nUsing Qt version 5.0.0 in /usr/local/Trolltech/Qt-5.0.0/lib'
if qt_version in '5.1':
return 'QMake version 3.0\nUsing Qt version 5.1.1 in /usr/local/Qt-5.1/lib'
def test_baseline_search_path(self):
for case in self.search_paths_cases:
self._assert_search_path(**case)
def test_expectations_files(self):
for case in self.search_paths_cases:
expectations_case = deepcopy(case)
if expectations_case['use_webkit2']:
expectations_case['search_paths'].append("wk2")
expectations_case['search_paths'].append('')
expectations_case['search_paths'].reverse()
expectations_case['search_paths'] = map(lambda path: '/mock-checkout/LayoutTests/TestExpectations' if not path else '/mock-checkout/LayoutTests/platform/%s/TestExpectations' % (path), expectations_case['search_paths'])
self._assert_expectations_files(**expectations_case)
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
expected_logs = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--qt', 'file://test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_logs=expected_logs)
def test_setup_environ_for_server(self):
port = self.make_port()
env = port.setup_environ_for_server(port.driver_name())
self.assertEqual(env['QTWEBKIT_PLUGIN_PATH'], '/mock-build/lib/plugins')
def test_operating_system(self):
self.assertEqual('linux', self.make_port(port_name='qt-linux', os_name='linux').operating_system())
self.assertEqual('mac', self.make_port(os_name='mac').operating_system())
self.assertEqual('win', self.make_port(port_name='qt-win', os_name='win').operating_system())
def test_check_sys_deps(self):
port = self.make_port()
# Success
os.environ['WEBKIT_TESTFONTS'] = '/tmp/foo'
port._executive = MockExecutive2(exit_code=0)
self.assertTrue(port.check_sys_deps(needs_http=False))
# Failure
del os.environ['WEBKIT_TESTFONTS']
port._executive = MockExecutive2(exit_code=1,
output='testing output failure')
self.assertFalse(port.check_sys_deps(needs_http=False))
| bsd-3-clause |
leppa/home-assistant | tests/components/heos/test_config_flow.py | 3 | 4330 | """Tests for the Heos config flow module."""
from urllib.parse import urlparse
from pyheos import HeosError
from homeassistant import data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.heos.config_flow import HeosFlowHandler
from homeassistant.components.heos.const import DATA_DISCOVERED_HOSTS, DOMAIN
from homeassistant.const import CONF_HOST
async def test_flow_aborts_already_setup(hass, config_entry):
"""Test flow aborts when entry already setup."""
config_entry.add_to_hass(hass)
flow = HeosFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
async def test_no_host_shows_form(hass):
"""Test form is shown when host not provided."""
flow = HeosFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
async def test_cannot_connect_shows_error_form(hass, controller):
"""Test form is shown with error when cannot connect."""
flow = HeosFlowHandler()
flow.hass = hass
controller.connect.side_effect = HeosError()
result = await flow.async_step_user({CONF_HOST: "127.0.0.1"})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"][CONF_HOST] == "connection_failure"
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
controller.connect.reset_mock()
controller.disconnect.reset_mock()
async def test_create_entry_when_host_valid(hass, controller):
"""Test result type is create entry when host is valid."""
flow = HeosFlowHandler()
flow.hass = hass
data = {CONF_HOST: "127.0.0.1"}
result = await flow.async_step_user(data)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Controller (127.0.0.1)"
assert result["data"] == data
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
async def test_create_entry_when_friendly_name_valid(hass, controller):
"""Test result type is create entry when friendly name is valid."""
hass.data[DATA_DISCOVERED_HOSTS] = {"Office (127.0.0.1)": "127.0.0.1"}
flow = HeosFlowHandler()
flow.hass = hass
data = {CONF_HOST: "Office (127.0.0.1)"}
result = await flow.async_step_user(data)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Controller (127.0.0.1)"
assert result["data"] == {CONF_HOST: "127.0.0.1"}
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
assert DATA_DISCOVERED_HOSTS not in hass.data
async def test_discovery_shows_create_form(hass, controller, discovery_data):
"""Test discovery shows form to confirm setup and subsequent abort."""
await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=discovery_data
)
await hass.async_block_till_done()
assert len(hass.config_entries.flow.async_progress()) == 1
assert hass.data[DATA_DISCOVERED_HOSTS] == {"Office (127.0.0.1)": "127.0.0.1"}
port = urlparse(discovery_data[ssdp.ATTR_SSDP_LOCATION]).port
discovery_data[ssdp.ATTR_SSDP_LOCATION] = f"http://127.0.0.2:{port}/"
discovery_data[ssdp.ATTR_UPNP_FRIENDLY_NAME] = "Bedroom"
await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=discovery_data
)
await hass.async_block_till_done()
assert len(hass.config_entries.flow.async_progress()) == 1
assert hass.data[DATA_DISCOVERED_HOSTS] == {
"Office (127.0.0.1)": "127.0.0.1",
"Bedroom (127.0.0.2)": "127.0.0.2",
}
async def test_disovery_flow_aborts_already_setup(
hass, controller, discovery_data, config_entry
):
"""Test discovery flow aborts when entry already setup."""
config_entry.add_to_hass(hass)
flow = HeosFlowHandler()
flow.hass = hass
result = await flow.async_step_ssdp(discovery_data)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_setup"
| apache-2.0 |
rezasafi/spark | python/pyspark/ml/linalg/__init__.py | 20 | 39556 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
MLlib utilities for linear algebra. For dense vectors, MLlib
uses the NumPy `array` type, so you can simply pass NumPy arrays
around. For sparse vectors, users can construct a :class:`SparseVector`
object from MLlib or pass SciPy `scipy.sparse` column vectors if
SciPy is available in their environment.
"""
import sys
import array
import struct
if sys.version >= '3':
basestring = str
xrange = range
import copyreg as copy_reg
long = int
else:
from itertools import izip as zip
import copy_reg
import numpy as np
from pyspark import since
from pyspark.sql.types import UserDefinedType, StructField, StructType, ArrayType, DoubleType, \
IntegerType, ByteType, BooleanType
__all__ = ['Vector', 'DenseVector', 'SparseVector', 'Vectors',
'Matrix', 'DenseMatrix', 'SparseMatrix', 'Matrices']
if sys.version_info[:2] == (2, 7):
# speed up pickling array in Python 2.7
def fast_pickle_array(ar):
return array.array, (ar.typecode, ar.tostring())
copy_reg.pickle(array.array, fast_pickle_array)
# Check whether we have SciPy. MLlib works without it too, but if we have it, some methods,
# such as _dot and _serialize_double_vector, start to support scipy.sparse matrices.
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy in environment, but that's okay
_have_scipy = False
def _convert_to_vector(l):
if isinstance(l, Vector):
return l
elif type(l) in (array.array, np.array, np.ndarray, list, tuple, xrange):
return DenseVector(l)
elif _have_scipy and scipy.sparse.issparse(l):
assert l.shape[1] == 1, "Expected column vector"
# Make sure the converted csc_matrix has sorted indices.
csc = l.tocsc()
if not csc.has_sorted_indices:
csc.sort_indices()
return SparseVector(l.shape[0], csc.indices, csc.data)
else:
raise TypeError("Cannot convert type %s into Vector" % type(l))
def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
"""
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v))
def _format_float(f, digits=4):
s = str(round(f, digits))
if '.' in s:
s = s[:s.index('.') + 1 + digits]
return s
def _format_float_list(l):
return [_format_float(x) for x in l]
def _double_to_long_bits(value):
if np.isnan(value):
value = float('nan')
# pack double into 64 bits, then unpack as long int
return struct.unpack('Q', struct.pack('d', value))[0]
class VectorUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for Vector.
"""
@classmethod
def sqlType(cls):
return StructType([
StructField("type", ByteType(), False),
StructField("size", IntegerType(), True),
StructField("indices", ArrayType(IntegerType(), False), True),
StructField("values", ArrayType(DoubleType(), False), True)])
@classmethod
def module(cls):
return "pyspark.ml.linalg"
@classmethod
def scalaUDT(cls):
return "org.apache.spark.ml.linalg.VectorUDT"
def serialize(self, obj):
if isinstance(obj, SparseVector):
indices = [int(i) for i in obj.indices]
values = [float(v) for v in obj.values]
return (0, obj.size, indices, values)
elif isinstance(obj, DenseVector):
values = [float(v) for v in obj]
return (1, None, None, values)
else:
raise TypeError("cannot serialize %r of type %r" % (obj, type(obj)))
def deserialize(self, datum):
assert len(datum) == 4, \
"VectorUDT.deserialize given row with length %d but requires 4" % len(datum)
tpe = datum[0]
if tpe == 0:
return SparseVector(datum[1], datum[2], datum[3])
elif tpe == 1:
return DenseVector(datum[3])
else:
raise ValueError("do not recognize type %r" % tpe)
def simpleString(self):
return "vector"
class MatrixUDT(UserDefinedType):
"""
SQL user-defined type (UDT) for Matrix.
"""
@classmethod
def sqlType(cls):
return StructType([
StructField("type", ByteType(), False),
StructField("numRows", IntegerType(), False),
StructField("numCols", IntegerType(), False),
StructField("colPtrs", ArrayType(IntegerType(), False), True),
StructField("rowIndices", ArrayType(IntegerType(), False), True),
StructField("values", ArrayType(DoubleType(), False), True),
StructField("isTransposed", BooleanType(), False)])
@classmethod
def module(cls):
return "pyspark.ml.linalg"
@classmethod
def scalaUDT(cls):
return "org.apache.spark.ml.linalg.MatrixUDT"
def serialize(self, obj):
if isinstance(obj, SparseMatrix):
colPtrs = [int(i) for i in obj.colPtrs]
rowIndices = [int(i) for i in obj.rowIndices]
values = [float(v) for v in obj.values]
return (0, obj.numRows, obj.numCols, colPtrs,
rowIndices, values, bool(obj.isTransposed))
elif isinstance(obj, DenseMatrix):
values = [float(v) for v in obj.values]
return (1, obj.numRows, obj.numCols, None, None, values,
bool(obj.isTransposed))
else:
raise TypeError("cannot serialize type %r" % (type(obj)))
def deserialize(self, datum):
assert len(datum) == 7, \
"MatrixUDT.deserialize given row with length %d but requires 7" % len(datum)
tpe = datum[0]
if tpe == 0:
return SparseMatrix(*datum[1:])
elif tpe == 1:
return DenseMatrix(datum[1], datum[2], datum[5], datum[6])
else:
raise ValueError("do not recognize type %r" % tpe)
def simpleString(self):
return "matrix"
class Vector(object):
__UDT__ = VectorUDT()
"""
Abstract class for DenseVector and SparseVector
"""
def toArray(self):
"""
Convert the vector into an numpy.ndarray
:return: numpy.ndarray
"""
raise NotImplementedError
class DenseVector(Vector):
"""
A dense vector represented by a value array. We use numpy array for
storage and arithmetics will be delegated to the underlying numpy
array.
>>> v = Vectors.dense([1.0, 2.0])
>>> u = Vectors.dense([3.0, 4.0])
>>> v + u
DenseVector([4.0, 6.0])
>>> 2 - v
DenseVector([1.0, 0.0])
>>> v / 2
DenseVector([0.5, 1.0])
>>> v * u
DenseVector([3.0, 8.0])
>>> u / v
DenseVector([3.0, 2.0])
>>> u % 2
DenseVector([1.0, 0.0])
>>> -v
DenseVector([-1.0, -2.0])
"""
def __init__(self, ar):
if isinstance(ar, bytes):
ar = np.frombuffer(ar, dtype=np.float64)
elif not isinstance(ar, np.ndarray):
ar = np.array(ar, dtype=np.float64)
if ar.dtype != np.float64:
ar = ar.astype(np.float64)
self.array = ar
def __reduce__(self):
return DenseVector, (self.array.tostring(),)
def numNonzeros(self):
"""
Number of nonzero elements. This scans all active values and count non zeros
"""
return np.count_nonzero(self.array)
def norm(self, p):
"""
Calculates the norm of a DenseVector.
>>> a = DenseVector([0, -1, 2, -3])
>>> a.norm(2)
3.7...
>>> a.norm(1)
6.0
"""
return np.linalg.norm(self.array, p)
def dot(self, other):
"""
Compute the dot product of two Vectors. We support
(Numpy array, list, SparseVector, or SciPy sparse)
and a target NumPy array that is either 1- or 2-dimensional.
Equivalent to calling numpy.dot of the two vectors.
>>> dense = DenseVector(array.array('d', [1., 2.]))
>>> dense.dot(dense)
5.0
>>> dense.dot(SparseVector(2, [0, 1], [2., 1.]))
4.0
>>> dense.dot(range(1, 3))
5.0
>>> dense.dot(np.array(range(1, 3)))
5.0
>>> dense.dot([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense.dot(np.reshape([1., 2., 3., 4.], (2, 2), order='F'))
array([ 5., 11.])
>>> dense.dot(np.reshape([1., 2., 3.], (3, 1), order='F'))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if type(other) == np.ndarray:
if other.ndim > 1:
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.array, other)
elif _have_scipy and scipy.sparse.issparse(other):
assert len(self) == other.shape[0], "dimension mismatch"
return other.transpose().dot(self.toArray())
else:
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.dot(self)
elif isinstance(other, Vector):
return np.dot(self.toArray(), other.toArray())
else:
return np.dot(self.toArray(), other)
def squared_distance(self, other):
"""
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.squared_distance(self)
elif _have_scipy and scipy.sparse.issparse(other):
return _convert_to_vector(other).squared_distance(self)
if isinstance(other, Vector):
other = other.toArray()
elif not isinstance(other, np.ndarray):
other = np.array(other)
diff = self.toArray() - other
return np.dot(diff, diff)
def toArray(self):
"""
Returns the underlying numpy.ndarray
"""
return self.array
@property
def values(self):
"""
Returns the underlying numpy.ndarray
"""
return self.array
def __getitem__(self, item):
return self.array[item]
def __len__(self):
return len(self.array)
def __str__(self):
return "[" + ",".join([str(v) for v in self.array]) + "]"
def __repr__(self):
return "DenseVector([%s])" % (', '.join(_format_float(i) for i in self.array))
def __eq__(self, other):
if isinstance(other, DenseVector):
return np.array_equal(self.array, other.array)
elif isinstance(other, SparseVector):
if len(self) != other.size:
return False
return Vectors._equals(list(xrange(len(self))), self.array, other.indices, other.values)
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
size = len(self)
result = 31 + size
nnz = 0
i = 0
while i < size and nnz < 128:
if self.array[i] != 0:
result = 31 * result + i
bits = _double_to_long_bits(self.array[i])
result = 31 * result + (bits ^ (bits >> 32))
nnz += 1
i += 1
return result
def __getattr__(self, item):
return getattr(self.array, item)
def __neg__(self):
return DenseVector(-self.array)
def _delegate(op):
def func(self, other):
if isinstance(other, DenseVector):
other = other.array
return DenseVector(getattr(self.array, op)(other))
return func
__add__ = _delegate("__add__")
__sub__ = _delegate("__sub__")
__mul__ = _delegate("__mul__")
__div__ = _delegate("__div__")
__truediv__ = _delegate("__truediv__")
__mod__ = _delegate("__mod__")
__radd__ = _delegate("__radd__")
__rsub__ = _delegate("__rsub__")
__rmul__ = _delegate("__rmul__")
__rdiv__ = _delegate("__rdiv__")
__rtruediv__ = _delegate("__rtruediv__")
__rmod__ = _delegate("__rmod__")
class SparseVector(Vector):
"""
A simple sparse vector class for passing data to MLlib. Users may
alternatively pass SciPy's {scipy.sparse} data types.
"""
def __init__(self, size, *args):
"""
Create a sparse vector, using either a dictionary, a list of
(index, value) pairs, or two separate arrays of indices and
values (sorted by index).
:param size: Size of the vector.
:param args: Active entries, as a dictionary {index: value, ...},
a list of tuples [(index, value), ...], or a list of strictly
increasing indices and a list of corresponding values [index, ...],
[value, ...]. Inactive entries are treated as zeros.
>>> SparseVector(4, {1: 1.0, 3: 5.5})
SparseVector(4, {1: 1.0, 3: 5.5})
>>> SparseVector(4, [(1, 1.0), (3, 5.5)])
SparseVector(4, {1: 1.0, 3: 5.5})
>>> SparseVector(4, [1, 3], [1.0, 5.5])
SparseVector(4, {1: 1.0, 3: 5.5})
>>> SparseVector(4, {1:1.0, 6:2.0})
Traceback (most recent call last):
...
AssertionError: Index 6 is out of the size of vector with size=4
>>> SparseVector(4, {-1:1.0})
Traceback (most recent call last):
...
AssertionError: Contains negative index -1
"""
self.size = int(size)
""" Size of the vector. """
assert 1 <= len(args) <= 2, "must pass either 2 or 3 arguments"
if len(args) == 1:
pairs = args[0]
if type(pairs) == dict:
pairs = pairs.items()
pairs = sorted(pairs)
self.indices = np.array([p[0] for p in pairs], dtype=np.int32)
""" A list of indices corresponding to active entries. """
self.values = np.array([p[1] for p in pairs], dtype=np.float64)
""" A list of values corresponding to active entries. """
else:
if isinstance(args[0], bytes):
assert isinstance(args[1], bytes), "values should be string too"
if args[0]:
self.indices = np.frombuffer(args[0], np.int32)
self.values = np.frombuffer(args[1], np.float64)
else:
# np.frombuffer() doesn't work well with empty string in older version
self.indices = np.array([], dtype=np.int32)
self.values = np.array([], dtype=np.float64)
else:
self.indices = np.array(args[0], dtype=np.int32)
self.values = np.array(args[1], dtype=np.float64)
assert len(self.indices) == len(self.values), "index and value arrays not same length"
for i in xrange(len(self.indices) - 1):
if self.indices[i] >= self.indices[i + 1]:
raise TypeError(
"Indices %s and %s are not strictly increasing"
% (self.indices[i], self.indices[i + 1]))
if self.indices.size > 0:
assert np.max(self.indices) < self.size, \
"Index %d is out of the size of vector with size=%d" \
% (np.max(self.indices), self.size)
assert np.min(self.indices) >= 0, \
"Contains negative index %d" % (np.min(self.indices))
def numNonzeros(self):
"""
Number of nonzero elements. This scans all active values and count non zeros.
"""
return np.count_nonzero(self.values)
def norm(self, p):
"""
Calculates the norm of a SparseVector.
>>> a = SparseVector(4, [0, 1], [3., -4.])
>>> a.norm(1)
7.0
>>> a.norm(2)
5.0
"""
return np.linalg.norm(self.values, p)
def __reduce__(self):
return (
SparseVector,
(self.size, self.indices.tostring(), self.values.tostring()))
def dot(self, other):
"""
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
if isinstance(other, np.ndarray):
if other.ndim not in [2, 1]:
raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim)
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.values, other[self.indices])
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, DenseVector):
return np.dot(other.array[self.indices], self.values)
elif isinstance(other, SparseVector):
# Find out common indices.
self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)
self_values = self.values[self_cmind]
if self_values.size == 0:
return 0.0
else:
other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)
return np.dot(self_values, other.values[other_cmind])
else:
return self.dot(_convert_to_vector(other))
def squared_distance(self, other):
"""
Squared distance from a SparseVector or 1-dimensional NumPy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.squared_distance(a)
0.0
>>> a.squared_distance(array.array('d', [1., 2., 3., 4.]))
11.0
>>> a.squared_distance(np.array([1., 2., 3., 4.]))
11.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.squared_distance(b)
26.0
>>> b.squared_distance(a)
26.0
>>> b.squared_distance([1., 2.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> b.squared_distance(SparseVector(3, [1,], [1.0,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
"""
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, np.ndarray) or isinstance(other, DenseVector):
if isinstance(other, np.ndarray) and other.ndim != 1:
raise Exception("Cannot call squared_distance with %d-dimensional array" %
other.ndim)
if isinstance(other, DenseVector):
other = other.array
sparse_ind = np.zeros(other.size, dtype=bool)
sparse_ind[self.indices] = True
dist = other[sparse_ind] - self.values
result = np.dot(dist, dist)
other_ind = other[~sparse_ind]
result += np.dot(other_ind, other_ind)
return result
elif isinstance(other, SparseVector):
result = 0.0
i, j = 0, 0
while i < len(self.indices) and j < len(other.indices):
if self.indices[i] == other.indices[j]:
diff = self.values[i] - other.values[j]
result += diff * diff
i += 1
j += 1
elif self.indices[i] < other.indices[j]:
result += self.values[i] * self.values[i]
i += 1
else:
result += other.values[j] * other.values[j]
j += 1
while i < len(self.indices):
result += self.values[i] * self.values[i]
i += 1
while j < len(other.indices):
result += other.values[j] * other.values[j]
j += 1
return result
else:
return self.squared_distance(_convert_to_vector(other))
def toArray(self):
"""
Returns a copy of this SparseVector as a 1-dimensional numpy.ndarray.
"""
arr = np.zeros((self.size,), dtype=np.float64)
arr[self.indices] = self.values
return arr
def __len__(self):
return self.size
def __str__(self):
inds = "[" + ",".join([str(i) for i in self.indices]) + "]"
vals = "[" + ",".join([str(v) for v in self.values]) + "]"
return "(" + ",".join((str(self.size), inds, vals)) + ")"
def __repr__(self):
inds = self.indices
vals = self.values
entries = ", ".join(["{0}: {1}".format(inds[i], _format_float(vals[i]))
for i in xrange(len(inds))])
return "SparseVector({0}, {{{1}}})".format(self.size, entries)
def __eq__(self, other):
if isinstance(other, SparseVector):
return other.size == self.size and np.array_equal(other.indices, self.indices) \
and np.array_equal(other.values, self.values)
elif isinstance(other, DenseVector):
if self.size != len(other):
return False
return Vectors._equals(self.indices, self.values, list(xrange(len(other))), other.array)
return False
def __getitem__(self, index):
inds = self.indices
vals = self.values
if not isinstance(index, int):
raise TypeError(
"Indices must be of type integer, got type %s" % type(index))
if index >= self.size or index < -self.size:
raise IndexError("Index %d out of bounds." % index)
if index < 0:
index += self.size
if (inds.size == 0) or (index > inds.item(-1)):
return 0.
insert_index = np.searchsorted(inds, index)
row_ind = inds[insert_index]
if row_ind == index:
return vals[insert_index]
return 0.
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
result = 31 + self.size
nnz = 0
i = 0
while i < len(self.values) and nnz < 128:
if self.values[i] != 0:
result = 31 * result + int(self.indices[i])
bits = _double_to_long_bits(self.values[i])
result = 31 * result + (bits ^ (bits >> 32))
nnz += 1
i += 1
return result
class Vectors(object):
"""
Factory methods for working with vectors.
.. note:: Dense vectors are simply represented as NumPy array objects,
so there is no need to covert them for use in MLlib. For sparse vectors,
the factory methods in this class create an MLlib-compatible type, or users
can pass in SciPy's `scipy.sparse` column vectors.
"""
@staticmethod
def sparse(size, *args):
"""
Create a sparse vector, using either a dictionary, a list of
(index, value) pairs, or two separate arrays of indices and
values (sorted by index).
:param size: Size of the vector.
:param args: Non-zero entries, as a dictionary, list of tuples,
or two sorted lists containing indices and values.
>>> Vectors.sparse(4, {1: 1.0, 3: 5.5})
SparseVector(4, {1: 1.0, 3: 5.5})
>>> Vectors.sparse(4, [(1, 1.0), (3, 5.5)])
SparseVector(4, {1: 1.0, 3: 5.5})
>>> Vectors.sparse(4, [1, 3], [1.0, 5.5])
SparseVector(4, {1: 1.0, 3: 5.5})
"""
return SparseVector(size, *args)
@staticmethod
def dense(*elements):
"""
Create a dense vector of 64-bit floats from a Python list or numbers.
>>> Vectors.dense([1, 2, 3])
DenseVector([1.0, 2.0, 3.0])
>>> Vectors.dense(1.0, 2.0)
DenseVector([1.0, 2.0])
"""
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)):
# it's list, numpy.array or other iterable object.
elements = elements[0]
return DenseVector(elements)
@staticmethod
def squared_distance(v1, v2):
"""
Squared distance between two vectors.
a and b can be of type SparseVector, DenseVector, np.ndarray
or array.array.
>>> a = Vectors.sparse(4, [(0, 1), (3, 4)])
>>> b = Vectors.dense([2, 5, 4, 1])
>>> a.squared_distance(b)
51.0
"""
v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2)
return v1.squared_distance(v2)
@staticmethod
def norm(vector, p):
"""
Find norm of the given vector.
"""
return _convert_to_vector(vector).norm(p)
@staticmethod
def zeros(size):
return DenseVector(np.zeros(size))
@staticmethod
def _equals(v1_indices, v1_values, v2_indices, v2_values):
"""
Check equality between sparse/dense vectors,
v1_indices and v2_indices assume to be strictly increasing.
"""
v1_size = len(v1_values)
v2_size = len(v2_values)
k1 = 0
k2 = 0
all_equal = True
while all_equal:
while k1 < v1_size and v1_values[k1] == 0:
k1 += 1
while k2 < v2_size and v2_values[k2] == 0:
k2 += 1
if k1 >= v1_size or k2 >= v2_size:
return k1 >= v1_size and k2 >= v2_size
all_equal = v1_indices[k1] == v2_indices[k2] and v1_values[k1] == v2_values[k2]
k1 += 1
k2 += 1
return all_equal
class Matrix(object):
__UDT__ = MatrixUDT()
"""
Represents a local matrix.
"""
def __init__(self, numRows, numCols, isTransposed=False):
self.numRows = numRows
self.numCols = numCols
self.isTransposed = isTransposed
def toArray(self):
"""
Returns its elements in a numpy.ndarray.
"""
raise NotImplementedError
@staticmethod
def _convert_to_array(array_like, dtype):
"""
Convert Matrix attributes which are array-like or buffer to array.
"""
if isinstance(array_like, bytes):
return np.frombuffer(array_like, dtype=dtype)
return np.asarray(array_like, dtype=dtype)
class DenseMatrix(Matrix):
"""
Column-major dense matrix.
"""
def __init__(self, numRows, numCols, values, isTransposed=False):
Matrix.__init__(self, numRows, numCols, isTransposed)
values = self._convert_to_array(values, np.float64)
assert len(values) == numRows * numCols
self.values = values
def __reduce__(self):
return DenseMatrix, (
self.numRows, self.numCols, self.values.tostring(),
int(self.isTransposed))
def __str__(self):
"""
Pretty printing of a DenseMatrix
>>> dm = DenseMatrix(2, 2, range(4))
>>> print(dm)
DenseMatrix([[ 0., 2.],
[ 1., 3.]])
>>> dm = DenseMatrix(2, 2, range(4), isTransposed=True)
>>> print(dm)
DenseMatrix([[ 0., 1.],
[ 2., 3.]])
"""
# Inspired by __repr__ in scipy matrices.
array_lines = repr(self.toArray()).splitlines()
# We need to adjust six spaces which is the difference in number
# of letters between "DenseMatrix" and "array"
x = '\n'.join([(" " * 6 + line) for line in array_lines[1:]])
return array_lines[0].replace("array", "DenseMatrix") + "\n" + x
def __repr__(self):
"""
Representation of a DenseMatrix
>>> dm = DenseMatrix(2, 2, range(4))
>>> dm
DenseMatrix(2, 2, [0.0, 1.0, 2.0, 3.0], False)
"""
# If the number of values are less than seventeen then return as it is.
# Else return first eight values and last eight values.
if len(self.values) < 17:
entries = _format_float_list(self.values)
else:
entries = (
_format_float_list(self.values[:8]) +
["..."] +
_format_float_list(self.values[-8:])
)
entries = ", ".join(entries)
return "DenseMatrix({0}, {1}, [{2}], {3})".format(
self.numRows, self.numCols, entries, self.isTransposed)
def toArray(self):
"""
Return a numpy.ndarray
>>> m = DenseMatrix(2, 2, range(4))
>>> m.toArray()
array([[ 0., 2.],
[ 1., 3.]])
"""
if self.isTransposed:
return np.asfortranarray(
self.values.reshape((self.numRows, self.numCols)))
else:
return self.values.reshape((self.numRows, self.numCols), order='F')
def toSparse(self):
"""Convert to SparseMatrix"""
if self.isTransposed:
values = np.ravel(self.toArray(), order='F')
else:
values = self.values
indices = np.nonzero(values)[0]
colCounts = np.bincount(indices // self.numRows)
colPtrs = np.cumsum(np.hstack(
(0, colCounts, np.zeros(self.numCols - colCounts.size))))
values = values[indices]
rowIndices = indices % self.numRows
return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)
def __getitem__(self, indices):
i, j = indices
if i < 0 or i >= self.numRows:
raise IndexError("Row index %d is out of range [0, %d)"
% (i, self.numRows))
if j >= self.numCols or j < 0:
raise IndexError("Column index %d is out of range [0, %d)"
% (j, self.numCols))
if self.isTransposed:
return self.values[i * self.numCols + j]
else:
return self.values[i + j * self.numRows]
def __eq__(self, other):
if (self.numRows != other.numRows or self.numCols != other.numCols):
return False
if isinstance(other, SparseMatrix):
return np.all(self.toArray() == other.toArray())
self_values = np.ravel(self.toArray(), order='F')
other_values = np.ravel(other.toArray(), order='F')
return np.all(self_values == other_values)
class SparseMatrix(Matrix):
"""Sparse Matrix stored in CSC format."""
def __init__(self, numRows, numCols, colPtrs, rowIndices, values,
isTransposed=False):
Matrix.__init__(self, numRows, numCols, isTransposed)
self.colPtrs = self._convert_to_array(colPtrs, np.int32)
self.rowIndices = self._convert_to_array(rowIndices, np.int32)
self.values = self._convert_to_array(values, np.float64)
if self.isTransposed:
if self.colPtrs.size != numRows + 1:
raise ValueError("Expected colPtrs of size %d, got %d."
% (numRows + 1, self.colPtrs.size))
else:
if self.colPtrs.size != numCols + 1:
raise ValueError("Expected colPtrs of size %d, got %d."
% (numCols + 1, self.colPtrs.size))
if self.rowIndices.size != self.values.size:
raise ValueError("Expected rowIndices of length %d, got %d."
% (self.rowIndices.size, self.values.size))
def __str__(self):
"""
Pretty printing of a SparseMatrix
>>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4])
>>> print(sm1)
2 X 2 CSCMatrix
(0,0) 2.0
(1,0) 3.0
(1,1) 4.0
>>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4], True)
>>> print(sm1)
2 X 2 CSRMatrix
(0,0) 2.0
(0,1) 3.0
(1,1) 4.0
"""
spstr = "{0} X {1} ".format(self.numRows, self.numCols)
if self.isTransposed:
spstr += "CSRMatrix\n"
else:
spstr += "CSCMatrix\n"
cur_col = 0
smlist = []
# Display first 16 values.
if len(self.values) <= 16:
zipindval = zip(self.rowIndices, self.values)
else:
zipindval = zip(self.rowIndices[:16], self.values[:16])
for i, (rowInd, value) in enumerate(zipindval):
if self.colPtrs[cur_col + 1] <= i:
cur_col += 1
if self.isTransposed:
smlist.append('({0},{1}) {2}'.format(
cur_col, rowInd, _format_float(value)))
else:
smlist.append('({0},{1}) {2}'.format(
rowInd, cur_col, _format_float(value)))
spstr += "\n".join(smlist)
if len(self.values) > 16:
spstr += "\n.." * 2
return spstr
def __repr__(self):
"""
Representation of a SparseMatrix
>>> sm1 = SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4])
>>> sm1
SparseMatrix(2, 2, [0, 2, 3], [0, 1, 1], [2.0, 3.0, 4.0], False)
"""
rowIndices = list(self.rowIndices)
colPtrs = list(self.colPtrs)
if len(self.values) <= 16:
values = _format_float_list(self.values)
else:
values = (
_format_float_list(self.values[:8]) +
["..."] +
_format_float_list(self.values[-8:])
)
rowIndices = rowIndices[:8] + ["..."] + rowIndices[-8:]
if len(self.colPtrs) > 16:
colPtrs = colPtrs[:8] + ["..."] + colPtrs[-8:]
values = ", ".join(values)
rowIndices = ", ".join([str(ind) for ind in rowIndices])
colPtrs = ", ".join([str(ptr) for ptr in colPtrs])
return "SparseMatrix({0}, {1}, [{2}], [{3}], [{4}], {5})".format(
self.numRows, self.numCols, colPtrs, rowIndices,
values, self.isTransposed)
def __reduce__(self):
return SparseMatrix, (
self.numRows, self.numCols, self.colPtrs.tostring(),
self.rowIndices.tostring(), self.values.tostring(),
int(self.isTransposed))
def __getitem__(self, indices):
i, j = indices
if i < 0 or i >= self.numRows:
raise IndexError("Row index %d is out of range [0, %d)"
% (i, self.numRows))
if j < 0 or j >= self.numCols:
raise IndexError("Column index %d is out of range [0, %d)"
% (j, self.numCols))
# If a CSR matrix is given, then the row index should be searched
# for in ColPtrs, and the column index should be searched for in the
# corresponding slice obtained from rowIndices.
if self.isTransposed:
j, i = i, j
colStart = self.colPtrs[j]
colEnd = self.colPtrs[j + 1]
nz = self.rowIndices[colStart: colEnd]
ind = np.searchsorted(nz, i) + colStart
if ind < colEnd and self.rowIndices[ind] == i:
return self.values[ind]
else:
return 0.0
def toArray(self):
"""
Return a numpy.ndarray
"""
A = np.zeros((self.numRows, self.numCols), dtype=np.float64, order='F')
for k in xrange(self.colPtrs.size - 1):
startptr = self.colPtrs[k]
endptr = self.colPtrs[k + 1]
if self.isTransposed:
A[k, self.rowIndices[startptr:endptr]] = self.values[startptr:endptr]
else:
A[self.rowIndices[startptr:endptr], k] = self.values[startptr:endptr]
return A
def toDense(self):
densevals = np.ravel(self.toArray(), order='F')
return DenseMatrix(self.numRows, self.numCols, densevals)
# TODO: More efficient implementation:
def __eq__(self, other):
return np.all(self.toArray() == other.toArray())
class Matrices(object):
@staticmethod
def dense(numRows, numCols, values):
"""
Create a DenseMatrix
"""
return DenseMatrix(numRows, numCols, values)
@staticmethod
def sparse(numRows, numCols, colPtrs, rowIndices, values):
"""
Create a SparseMatrix
"""
return SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
def _test():
import doctest
try:
# Numpy 1.14+ changed it's string format.
np.set_printoptions(legacy='1.13')
except TypeError:
pass
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/requests/packages/chardet/__init__.py | 1778 | 1295 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| mit |
xiaojunwu/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_img-src_asterisk_allowed_int-manual.py | 30 | 2469 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
response.headers.set("Content-Security-Policy", "img-src *")
response.headers.set("X-Content-Security-Policy", "img-src *")
response.headers.set("X-WebKit-CSP", "img-src *")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_img-src_asterisk_allowed_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#img-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="img-src *"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is a filled blue square.</p>
<img src="support/blue-100x100.png"/>
</body>
</html> """
| bsd-3-clause |
pgleeson/TempRepo3 | lib/jython/Lib/site-packages/xlrd/__init__.py | 64 | 72055 | # -*- coding: cp1252 -*-
__VERSION__ = "0.7.1" # 2009-05-31
# <p>Copyright © 2005-2009 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a
# BSD-style licence.</p>
import licences
##
# <p><b>A Python module for extracting data from MS Excel spreadsheet files.
# <br /><br />
# Version 0.7.1 -- 2009-05-31
# </b></p>
#
# <h2>General information</h2>
#
# <h3>Acknowledgements</h3>
#
# <p>
# Development of this module would not have been possible without the document
# "OpenOffice.org's Documentation of the Microsoft Excel File Format"
# ("OOo docs" for short).
# The latest version is available from OpenOffice.org in
# <a href=http://sc.openoffice.org/excelfileformat.pdf> PDF format</a>
# and
# <a href=http://sc.openoffice.org/excelfileformat.odt> ODT format.</a>
# Small portions of the OOo docs are reproduced in this
# document. A study of the OOo docs is recommended for those who wish a
# deeper understanding of the Excel file layout than the xlrd docs can provide.
# </p>
#
# <p>Backporting to Python 2.1 was partially funded by
# <a href=http://journyx.com/>
# Journyx - provider of timesheet and project accounting solutions.
# </a>
# </p>
#
# <p>Provision of formatting information in version 0.6.1 was funded by
# <a href=http://www.simplistix.co.uk>
# Simplistix Ltd.
# </a>
# </p>
#
# <h3>Unicode</h3>
#
# <p>This module presents all text strings as Python unicode objects.
# From Excel 97 onwards, text in Excel spreadsheets has been stored as Unicode.
# Older files (Excel 95 and earlier) don't keep strings in Unicode;
# a CODEPAGE record provides a codepage number (for example, 1252) which is
# used by xlrd to derive the encoding (for same example: "cp1252") which is
# used to translate to Unicode.</p>
# <small>
# <p>If the CODEPAGE record is missing (possible if the file was created
# by third-party software), xlrd will assume that the encoding is ascii, and keep going.
# If the actual encoding is not ascii, a UnicodeDecodeError exception will be raised and
# you will need to determine the encoding yourself, and tell xlrd:
# <pre>
# book = xlrd.open_workbook(..., encoding_override="cp1252")
# </pre></p>
# <p>If the CODEPAGE record exists but is wrong (for example, the codepage
# number is 1251, but the strings are actually encoded in koi8_r),
# it can be overridden using the same mechanism.
# The supplied runxlrd.py has a corresponding command-line argument, which
# may be used for experimentation:
# <pre>
# runxlrd.py -e koi8_r 3rows myfile.xls
# </pre></p>
# <p>The first place to look for an encoding ("codec name") is
# <a href=http://docs.python.org/lib/standard-encodings.html>
# the Python documentation</a>.
# </p>
# </small>
#
# <h3>Dates in Excel spreadsheets</h3>
#
# <p>In reality, there are no such things. What you have are floating point
# numbers and pious hope.
# There are several problems with Excel dates:</p>
#
# <p>(1) Dates are not stored as a separate data type; they are stored as
# floating point numbers and you have to rely on
# (a) the "number format" applied to them in Excel and/or
# (b) knowing which cells are supposed to have dates in them.
# This module helps with (a) by inspecting the
# format that has been applied to each number cell;
# if it appears to be a date format, the cell
# is classified as a date rather than a number. Feedback on this feature,
# especially from non-English-speaking locales, would be appreciated.</p>
#
# <p>(2) Excel for Windows stores dates by default as the number of
# days (or fraction thereof) since 1899-12-31T00:00:00. Excel for
# Macintosh uses a default start date of 1904-01-01T00:00:00. The date
# system can be changed in Excel on a per-workbook basis (for example:
# Tools -> Options -> Calculation, tick the "1904 date system" box).
# This is of course a bad idea if there are already dates in the
# workbook. There is no good reason to change it even if there are no
# dates in the workbook. Which date system is in use is recorded in the
# workbook. A workbook transported from Windows to Macintosh (or vice
# versa) will work correctly with the host Excel. When using this
# module's xldate_as_tuple function to convert numbers from a workbook,
# you must use the datemode attribute of the Book object. If you guess,
# or make a judgement depending on where you believe the workbook was
# created, you run the risk of being 1462 days out of kilter.</p>
#
# <p>Reference:
# http://support.microsoft.com/default.aspx?scid=KB;EN-US;q180162</p>
#
#
# <p>(3) The Excel implementation of the Windows-default 1900-based date system works on the
# incorrect premise that 1900 was a leap year. It interprets the number 60 as meaning 1900-02-29,
# which is not a valid date. Consequently any number less than 61 is ambiguous. Example: is 59 the
# result of 1900-02-28 entered directly, or is it 1900-03-01 minus 2 days? The OpenOffice.org Calc
# program "corrects" the Microsoft problem; entering 1900-02-27 causes the number 59 to be stored.
# Save as an XLS file, then open the file with Excel -- you'll see 1900-02-28 displayed.</p>
#
# <p>Reference: http://support.microsoft.com/default.aspx?scid=kb;en-us;214326</p>
#
# <p>(4) The Macintosh-default 1904-based date system counts 1904-01-02 as day 1 and 1904-01-01 as day zero.
# Thus any number such that (0.0 <= number < 1.0) is ambiguous. Is 0.625 a time of day (15:00:00),
# independent of the calendar,
# or should it be interpreted as an instant on a particular day (1904-01-01T15:00:00)?
# The xldate_* functions in this module
# take the view that such a number is a calendar-independent time of day (like Python's datetime.time type) for both
# date systems. This is consistent with more recent Microsoft documentation
# (for example, the help file for Excel 2002 which says that the first day
# in the 1904 date system is 1904-01-02).
#
# <p>(5) Usage of the Excel DATE() function may leave strange dates in a spreadsheet. Quoting the help file,
# in respect of the 1900 date system: "If year is between 0 (zero) and 1899 (inclusive),
# Excel adds that value to 1900 to calculate the year. For example, DATE(108,1,2) returns January 2, 2008 (1900+108)."
# This gimmick, semi-defensible only for arguments up to 99 and only in the pre-Y2K-awareness era,
# means that DATE(1899, 12, 31) is interpreted as 3799-12-31.</p>
#
# <p>For further information, please refer to the documentation for the xldate_* functions.</p>
#
# <h3> Named references, constants, formulas, and macros</h3>
#
# <p>
# A name is used to refer to a cell, a group of cells, a constant
# value, a formula, or a macro. Usually the scope of a name is global
# across the whole workbook. However it can be local to a worksheet.
# For example, if the sales figures are in different cells in
# different sheets, the user may define the name "Sales" in each
# sheet. There are built-in names, like "Print_Area" and
# "Print_Titles"; these two are naturally local to a sheet.
# </p><p>
# To inspect the names with a user interface like MS Excel, OOo Calc,
# or Gnumeric, click on Insert/Names/Define. This will show the global
# names, plus those local to the currently selected sheet.
# </p><p>
# A Book object provides two dictionaries (name_map and
# name_and_scope_map) and a list (name_obj_list) which allow various
# ways of accessing the Name objects. There is one Name object for
# each NAME record found in the workbook. Name objects have many
# attributes, several of which are relevant only when obj.macro is 1.
# </p><p>
# In the examples directory you will find namesdemo.xls which
# showcases the many different ways that names can be used, and
# xlrdnamesAPIdemo.py which offers 3 different queries for inspecting
# the names in your files, and shows how to extract whatever a name is
# referring to. There is currently one "convenience method",
# Name.cell(), which extracts the value in the case where the name
# refers to a single cell. More convenience methods are planned. The
# source code for Name.cell (in __init__.py) is an extra source of
# information on how the Name attributes hang together.
# </p>
#
# <p><i>Name information is <b>not</b> extracted from files older than
# Excel 5.0 (Book.biff_version < 50)</i></p>
#
# <h3>Formatting</h3>
#
# <h4>Introduction</h4>
#
# <p>This collection of features, new in xlrd version 0.6.1, is intended
# to provide the information needed to (1) display/render spreadsheet contents
# (say) on a screen or in a PDF file, and (2) copy spreadsheet data to another
# file without losing the ability to display/render it.</p>
#
# <h4>The Palette; Colour Indexes</h4>
#
# <p>A colour is represented in Excel as a (red, green, blue) ("RGB") tuple
# with each component in range(256). However it is not possible to access an
# unlimited number of colours; each spreadsheet is limited to a palette of 64 different
# colours (24 in Excel 3.0 and 4.0, 8 in Excel 2.0). Colours are referenced by an index
# ("colour index") into this palette.
#
# Colour indexes 0 to 7 represent 8 fixed built-in colours: black, white, red, green, blue,
# yellow, magenta, and cyan.<p>
#
# The remaining colours in the palette (8 to 63 in Excel 5.0 and later)
# can be changed by the user. In the Excel 2003 UI, Tools/Options/Color presents a palette
# of 7 rows of 8 colours. The last two rows are reserved for use in charts.<br />
# The correspondence between this grid and the assigned
# colour indexes is NOT left-to-right top-to-bottom.<br />
# Indexes 8 to 15 correspond to changeable
# parallels of the 8 fixed colours -- for example, index 7 is forever cyan;
# index 15 starts off being cyan but can be changed by the user.<br />
#
# The default colour for each index depends on the file version; tables of the defaults
# are available in the source code. If the user changes one or more colours,
# a PALETTE record appears in the XLS file -- it gives the RGB values for *all* changeable
# indexes.<br />
# Note that colours can be used in "number formats": "[CYAN]...." and "[COLOR8]...." refer
# to colour index 7; "[COLOR16]...." will produce cyan
# unless the user changes colour index 15 to something else.<br />
#
# <p>In addition, there are several "magic" colour indexes used by Excel:<br />
# 0x18 (BIFF3-BIFF4), 0x40 (BIFF5-BIFF8): System window text colour for border lines
# (used in XF, CF, and WINDOW2 records)<br />
# 0x19 (BIFF3-BIFF4), 0x41 (BIFF5-BIFF8): System window background colour for pattern background
# (used in XF and CF records )<br />
# 0x43: System face colour (dialogue background colour)<br />
# 0x4D: System window text colour for chart border lines<br />
# 0x4E: System window background colour for chart areas<br />
# 0x4F: Automatic colour for chart border lines (seems to be always Black)<br />
# 0x50: System ToolTip background colour (used in note objects)<br />
# 0x51: System ToolTip text colour (used in note objects)<br />
# 0x7FFF: System window text colour for fonts (used in FONT and CF records)<br />
# Note 0x7FFF appears to be the *default* colour index. It appears quite often in FONT
# records.<br />
#
# <h4>Default Formatting</h4>
#
# Default formatting is applied to all empty cells (those not described by a cell record).
# Firstly row default information (ROW record, Rowinfo class) is used if available.
# Failing that, column default information (COLINFO record, Colinfo class) is used if available.
# As a last resort the worksheet/workbook default cell format will be used; this
# should always be present in an Excel file,
# described by the XF record with the fixed index 15 (0-based). By default, it uses the
# worksheet/workbook default cell style, described by the very first XF record (index 0).
#
# <h4> Formatting features not included in xlrd version 0.6.1</h4>
# <ul>
# <li>Rich text i.e. strings containing partial <b>bold</b> <i>italic</i>
# and <u>underlined</u> text, change of font inside a string, etc.
# See OOo docs s3.4 and s3.2</li>
# <li>Asian phonetic text (known as "ruby"), used for Japanese furigana. See OOo docs
# s3.4.2 (p15)</li>
# <li>Conditional formatting. See OOo docs
# s5.12, s6.21 (CONDFMT record), s6.16 (CF record)</li>
# <li>Miscellaneous sheet-level and book-level items e.g. printing layout, screen panes. </li>
# <li>Modern Excel file versions don't keep most of the built-in
# "number formats" in the file; Excel loads formats according to the
# user's locale. Currently xlrd's emulation of this is limited to
# a hard-wired table that applies to the US English locale. This may mean
# that currency symbols, date order, thousands separator, decimals separator, etc
# are inappropriate. Note that this does not affect users who are copying XLS
# files, only those who are visually rendering cells.</li>
# </ul>
#
# <h3>Loading worksheets on demand</h3>
#
# <p>This feature, new in version 0.7.1, is governed by the on_demand argument
# to the open_workbook() function and allows saving memory and time by loading
# only those sheets that the caller is interested in, and releasing sheets
# when no longer required.</p>
#
# <p>on_demand=False (default): No change. open_workbook() loads global data
# and all sheets, releases resources no longer required (principally the
# str or mmap object containing the Workbook stream), and returns.</p>
#
# <p>on_demand=True and BIFF version < 5.0: A warning message is emitted,
# on_demand is recorded as False, and the old process is followed.</p>
#
# <p>on_demand=True and BIFF version >= 5.0: open_workbook() loads global
# data and returns without releasing resources. At this stage, the only
# information available about sheets is Book.nsheets and Book.sheet_names().</p>
#
# <p>Book.sheet_by_name() and Book.sheet_by_index() will load the requested
# sheet if it is not already loaded.</p>
#
# <p>Book.sheets() will load all/any unloaded sheets.</p>
#
# <p>The caller may save memory by calling
# Book.unload_sheet(sheet_name_or_index) when finished with the sheet.
# This applies irrespective of the state of on_demand.</p>
#
# <p>The caller may re-load an unloaded sheet by calling Book.sheet_by_xxxx()
# -- except if those required resources have been released (which will
# have happened automatically when on_demand is false). This is the only
# case where an exception will be raised.</p>
#
# <p>The caller may query the state of a sheet:
# Book.sheet_loaded(sheet_name_or_index) -> a bool</p>
#
##
# 2009-04-27 SJM Integrated on_demand patch by Armando Serrano Lombillo
# 2008-11-23 SJM Support dumping FILEPASS and EXTERNNAME records; extra info from SUPBOOK records
# 2008-11-23 SJM colname utility function now supports more than 256 columns
# 2008-04-24 SJM Recovery code for file with out-of-order/missing/wrong CODEPAGE record needed to be called for EXTERNSHEET/BOUNDSHEET/NAME/SHEETHDR records.
# 2008-02-08 SJM Preparation for Excel 2.0 support
# 2008-02-03 SJM Minor tweaks for IronPython support
# 2008-02-02 SJM Previous change stopped dump() and count_records() ... fixed
# 2007-12-25 SJM Decouple Book initialisation & loading -- to allow for multiple loaders.
# 2007-12-20 SJM Better error message for unsupported file format.
# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files.
# 2007-11-20 SJM Wasn't handling EXTERNSHEET record that needed CONTINUE record(s)
# 2007-07-07 SJM Version changed to 0.7.0 (alpha 1)
# 2007-07-07 SJM Logfile arg wasn't being passed from open_workbook to compdoc.CompDoc
# 2007-05-21 SJM If no CODEPAGE record in pre-8.0 file, assume ascii and keep going.
# 2007-04-22 SJM Removed antique undocumented Book.get_name_dict method.
from timemachine import *
from biffh import *
from struct import unpack
import sys
import time
import sheet
import compdoc
from xldate import xldate_as_tuple, XLDateError
from formula import *
import formatting
if sys.version.startswith("IronPython"):
# print >> sys.stderr, "...importing encodings"
import encodings
empty_cell = sheet.empty_cell # for exposure to the world ...
DEBUG = 0
USE_FANCY_CD = 1
TOGGLE_GC = 0
import gc
# gc.set_debug(gc.DEBUG_STATS)
try:
import mmap
MMAP_AVAILABLE = 1
except ImportError:
MMAP_AVAILABLE = 0
USE_MMAP = MMAP_AVAILABLE
MY_EOF = 0xF00BAAA # not a 16-bit number
SUPBOOK_UNK, SUPBOOK_INTERNAL, SUPBOOK_EXTERNAL, SUPBOOK_ADDIN, SUPBOOK_DDEOLE = range(5)
SUPPORTED_VERSIONS = (80, 70, 50, 45, 40, 30, 21, 20)
code_from_builtin_name = {
u"Consolidate_Area": u"\x00",
u"Auto_Open": u"\x01",
u"Auto_Close": u"\x02",
u"Extract": u"\x03",
u"Database": u"\x04",
u"Criteria": u"\x05",
u"Print_Area": u"\x06",
u"Print_Titles": u"\x07",
u"Recorder": u"\x08",
u"Data_Form": u"\x09",
u"Auto_Activate": u"\x0A",
u"Auto_Deactivate": u"\x0B",
u"Sheet_Title": u"\x0C",
u"_FilterDatabase": u"\x0D",
}
builtin_name_from_code = {}
for _bin, _bic in code_from_builtin_name.items():
builtin_name_from_code[_bic] = _bin
del _bin, _bic
##
#
# Open a spreadsheet file for data extraction.
#
# @param filename The path to the spreadsheet file to be opened.
#
# @param logfile An open file to which messages and diagnostics are written.
#
# @param verbosity Increases the volume of trace material written to the logfile.
#
# @param pickleable Default is true. In Python 2.4 or earlier, setting to false
# will cause use of array.array objects which save some memory but can't be pickled.
# In Python 2.5, array.arrays are used unconditionally. Note: if you have large files that
# you need to read multiple times, it can be much faster to cPickle.dump() the xlrd.Book object
# once, and use cPickle.load() multiple times.
# @param use_mmap Whether to use the mmap module is determined heuristically.
# Use this arg to override the result. Current heuristic: mmap is used if it exists.
#
# @param file_contents ... as a string or an mmap.mmap object or some other behave-alike object.
# If file_contents is supplied, filename will not be used, except (possibly) in messages.
#
# @param encoding_override Used to overcome missing or bad codepage information
# in older-version files. Refer to discussion in the <b>Unicode</b> section above.
# <br /> -- New in version 0.6.0
#
# @param formatting_info Governs provision of a reference to an XF (eXtended Format) object
# for each cell in the worksheet.
# <br /> Default is <i>False</i>. This is backwards compatible and saves memory.
# "Blank" cells (those with their own formatting information but no data) are treated as empty
# (by ignoring the file's BLANK and MULBLANK records).
# It cuts off any bottom "margin" of rows of empty (and blank) cells and
# any right "margin" of columns of empty (and blank) cells.
# Only cell_value and cell_type are available.
# <br /> <i>True</i> provides all cells, including empty and blank cells.
# XF information is available for each cell.
# <br /> -- New in version 0.6.1
#
# @param on_demand Governs whether sheets are all loaded initially or when demanded
# by the caller. Please refer back to the section "Loading worksheets on demand" for details.
# -- New in version 0.7.1
#
# @return An instance of the Book class.
def open_workbook(filename=None,
logfile=sys.stdout, verbosity=0, pickleable=True, use_mmap=USE_MMAP,
file_contents=None,
encoding_override=None,
formatting_info=False, on_demand=False,
):
t0 = time.clock()
if TOGGLE_GC:
orig_gc_enabled = gc.isenabled()
if orig_gc_enabled:
gc.disable()
bk = Book()
bk.biff2_8_load(
filename=filename, file_contents=file_contents,
logfile=logfile, verbosity=verbosity, pickleable=pickleable, use_mmap=use_mmap,
encoding_override=encoding_override,
formatting_info=formatting_info,
on_demand=on_demand,
)
t1 = time.clock()
bk.load_time_stage_1 = t1 - t0
biff_version = bk.getbof(XL_WORKBOOK_GLOBALS)
if not biff_version:
raise XLRDError("Can't determine file's BIFF version")
if biff_version not in SUPPORTED_VERSIONS:
raise XLRDError(
"BIFF version %s is not supported"
% biff_text_from_num[biff_version]
)
bk.biff_version = biff_version
if biff_version <= 40:
# no workbook globals, only 1 worksheet
if on_demand:
fprintf(bk.logfile,
"*** WARNING: on_demand is not supported for this Excel version.\n"
"*** Setting on_demand to False.\n")
bk.on_demand = on_demand = False
bk.fake_globals_get_sheet()
elif biff_version == 45:
# worksheet(s) embedded in global stream
bk.parse_globals()
if on_demand:
fprintf(bk.logfile, "*** WARNING: on_demand is not supported for this Excel version.\n"
"*** Setting on_demand to False.\n")
bk.on_demand = on_demand = False
else:
bk.parse_globals()
bk._sheet_list = [None for sh in bk._sheet_names]
if not on_demand:
bk.get_sheets()
bk.nsheets = len(bk._sheet_list)
if biff_version == 45 and bk.nsheets > 1:
fprintf(bk.logfile,
"*** WARNING: Excel 4.0 workbook (.XLW) file contains %d worksheets.\n"
"*** Book-level data will be that of the last worksheet.\n",
bk.nsheets
)
if not on_demand:
bk.release_resources()
if TOGGLE_GC:
if orig_gc_enabled:
gc.enable()
t2 = time.clock()
bk.load_time_stage_2 = t2 - t1
return bk
##
# For debugging: dump the file's BIFF records in char & hex.
# @param filename The path to the file to be dumped.
# @param outfile An open file, to which the dump is written.
# @param unnumbered If true, omit offsets (for meaningful diffs).
def dump(filename, outfile=sys.stdout, unnumbered=False):
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_dump(bk.mem, bk.base, bk.stream_len, 0, outfile, unnumbered)
##
# For debugging and analysis: summarise the file's BIFF records.
# I.e. produce a sorted file of (record_name, count).
# @param filename The path to the file to be summarised.
# @param outfile An open file, to which the summary is written.
def count_records(filename, outfile=sys.stdout):
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_count_records(bk.mem, bk.base, bk.stream_len, outfile)
##
# Information relating to a named reference, formula, macro, etc.
# <br /> -- New in version 0.6.0
# <br /> -- <i>Name information is <b>not</b> extracted from files older than
# Excel 5.0 (Book.biff_version < 50)</i>
class Name(BaseObject):
_repr_these = ['stack']
book = None # parent
##
# 0 = Visible; 1 = Hidden
hidden = 0
##
# 0 = Command macro; 1 = Function macro. Relevant only if macro == 1
func = 0
##
# 0 = Sheet macro; 1 = VisualBasic macro. Relevant only if macro == 1
vbasic = 0
##
# 0 = Standard name; 1 = Macro name
macro = 0
##
# 0 = Simple formula; 1 = Complex formula (array formula or user defined)<br />
# <i>No examples have been sighted.</i>
complex = 0
##
# 0 = User-defined name; 1 = Built-in name
# (common examples: Print_Area, Print_Titles; see OOo docs for full list)
builtin = 0
##
# Function group. Relevant only if macro == 1; see OOo docs for values.
funcgroup = 0
##
# 0 = Formula definition; 1 = Binary data<br /> <i>No examples have been sighted.</i>
binary = 0
##
# The index of this object in book.name_obj_list
name_index = 0
##
# A Unicode string. If builtin, decoded as per OOo docs.
name = u""
##
# An 8-bit string.
raw_formula = ""
##
# -1: The name is global (visible in all calculation sheets).<br />
# -2: The name belongs to a macro sheet or VBA sheet.<br />
# -3: The name is invalid.<br />
# 0 <= scope < book.nsheets: The name is local to the sheet whose index is scope.
scope = -1
##
# The result of evaluating the formula, if any.
# If no formula, or evaluation of the formula encountered problems,
# the result is None. Otherwise the result is a single instance of the
# Operand class.
#
result = None
##
# This is a convenience method for the frequent use case where the name
# refers to a single cell.
# @return An instance of the Cell class.
# @throws XLRDError The name is not a constant absolute reference
# to a single cell.
def cell(self):
res = self.result
if res:
# result should be an instance of the Operand class
kind = res.kind
value = res.value
if kind == oREF and len(value) == 1:
ref3d = value[0]
if (0 <= ref3d.shtxlo == ref3d.shtxhi - 1
and ref3d.rowxlo == ref3d.rowxhi - 1
and ref3d.colxlo == ref3d.colxhi - 1):
sh = self.book.sheet_by_index(ref3d.shtxlo)
return sh.cell(ref3d.rowxlo, ref3d.colxlo)
self.dump(self.book.logfile,
header="=== Dump of Name object ===",
footer="======= End of dump =======",
)
raise XLRDError("Not a constant absolute reference to a single cell")
##
# This is a convenience method for the use case where the name
# refers to one rectangular area in one worksheet.
# @param clipped If true (the default), the returned rectangle is clipped
# to fit in (0, sheet.nrows, 0, sheet.ncols) -- it is guaranteed that
# 0 <= rowxlo <= rowxhi <= sheet.nrows and that the number of usable rows
# in the area (which may be zero) is rowxhi - rowxlo; likewise for columns.
# @return a tuple (sheet_object, rowxlo, rowxhi, colxlo, colxhi).
# @throws XLRDError The name is not a constant absolute reference
# to a single area in a single sheet.
def area2d(self, clipped=True):
res = self.result
if res:
# result should be an instance of the Operand class
kind = res.kind
value = res.value
if kind == oREF and len(value) == 1: # only 1 reference
ref3d = value[0]
if 0 <= ref3d.shtxlo == ref3d.shtxhi - 1: # only 1 usable sheet
sh = self.book.sheet_by_index(ref3d.shtxlo)
if not clipped:
return sh, ref3d.rowxlo, ref3d.rowxhi, ref3d.colxlo, ref3d.colxhi
rowxlo = min(ref3d.rowxlo, sh.nrows)
rowxhi = max(rowxlo, min(ref3d.rowxhi, sh.nrows))
colxlo = min(ref3d.colxlo, sh.ncols)
colxhi = max(colxlo, min(ref3d.colxhi, sh.ncols))
assert 0 <= rowxlo <= rowxhi <= sh.nrows
assert 0 <= colxlo <= colxhi <= sh.ncols
return sh, rowxlo, rowxhi, colxlo, colxhi
self.dump(self.book.logfile,
header="=== Dump of Name object ===",
footer="======= End of dump =======",
)
raise XLRDError("Not a constant absolute reference to a single area in a single sheet")
##
# Contents of a "workbook".
# <p>WARNING: You don't call this class yourself. You use the Book object that
# was returned when you called xlrd.open_workbook("myfile.xls").</p>
class Book(BaseObject):
##
# The number of worksheets present in the workbook file.
# This information is available even when no sheets have yet been loaded.
nsheets = 0
##
# Which date system was in force when this file was last saved.<br />
# 0 => 1900 system (the Excel for Windows default).<br />
# 1 => 1904 system (the Excel for Macintosh default).<br />
datemode = 0 # In case it's not specified in the file.
##
# Version of BIFF (Binary Interchange File Format) used to create the file.
# Latest is 8.0 (represented here as 80), introduced with Excel 97.
# Earliest supported by this module: 2.0 (represented as 20).
biff_version = 0
##
# List containing a Name object for each NAME record in the workbook.
# <br /> -- New in version 0.6.0
name_obj_list = []
##
# An integer denoting the character set used for strings in this file.
# For BIFF 8 and later, this will be 1200, meaning Unicode; more precisely, UTF_16_LE.
# For earlier versions, this is used to derive the appropriate Python encoding
# to be used to convert to Unicode.
# Examples: 1252 -> 'cp1252', 10000 -> 'mac_roman'
codepage = None
##
# The encoding that was derived from the codepage.
encoding = None
##
# A tuple containing the (telephone system) country code for:<br />
# [0]: the user-interface setting when the file was created.<br />
# [1]: the regional settings.<br />
# Example: (1, 61) meaning (USA, Australia).
# This information may give a clue to the correct encoding for an unknown codepage.
# For a long list of observed values, refer to the OpenOffice.org documentation for
# the COUNTRY record.
countries = (0, 0)
##
# What (if anything) is recorded as the name of the last user to save the file.
user_name = u''
##
# A list of Font class instances, each corresponding to a FONT record.
# <br /> -- New in version 0.6.1
font_list = []
##
# A list of XF class instances, each corresponding to an XF record.
# <br /> -- New in version 0.6.1
xf_list = []
##
# A list of Format objects, each corresponding to a FORMAT record, in
# the order that they appear in the input file.
# It does <i>not</i> contain builtin formats.
# If you are creating an output file using (for example) pyExcelerator,
# use this list.
# The collection to be used for all visual rendering purposes is format_map.
# <br /> -- New in version 0.6.1
format_list = []
##
# The mapping from XF.format_key to Format object.
# <br /> -- New in version 0.6.1
format_map = {}
##
# This provides access via name to the extended format information for
# both built-in styles and user-defined styles.<br />
# It maps <i>name</i> to (<i>built_in</i>, <i>xf_index</i>), where:<br />
# <i>name</i> is either the name of a user-defined style,
# or the name of one of the built-in styles. Known built-in names are
# Normal, RowLevel_1 to RowLevel_7,
# ColLevel_1 to ColLevel_7, Comma, Currency, Percent, "Comma [0]",
# "Currency [0]", Hyperlink, and "Followed Hyperlink".<br />
# <i>built_in</i> 1 = built-in style, 0 = user-defined<br />
# <i>xf_index</i> is an index into Book.xf_list.<br />
# References: OOo docs s6.99 (STYLE record); Excel UI Format/Style
# <br /> -- New in version 0.6.1
style_name_map = {}
##
# This provides definitions for colour indexes. Please refer to the
# above section "The Palette; Colour Indexes" for an explanation
# of how colours are represented in Excel.<br />
# Colour indexes into the palette map into (red, green, blue) tuples.
# "Magic" indexes e.g. 0x7FFF map to None.
# <i>colour_map</i> is what you need if you want to render cells on screen or in a PDF
# file. If you are writing an output XLS file, use <i>palette_record</i>.
# <br /> -- New in version 0.6.1. Extracted only if open_workbook(..., formatting_info=True)
colour_map = {}
##
# If the user has changed any of the colours in the standard palette, the XLS
# file will contain a PALETTE record with 56 (16 for Excel 4.0 and earlier)
# RGB values in it, and this list will be e.g. [(r0, b0, g0), ..., (r55, b55, g55)].
# Otherwise this list will be empty. This is what you need if you are
# writing an output XLS file. If you want to render cells on screen or in a PDF
# file, use colour_map.
# <br /> -- New in version 0.6.1. Extracted only if open_workbook(..., formatting_info=True)
palette_record = []
##
# Time in seconds to extract the XLS image as a contiguous string (or mmap equivalent).
load_time_stage_1 = -1.0
##
# Time in seconds to parse the data from the contiguous string (or mmap equivalent).
load_time_stage_2 = -1.0
##
# @return A list of all sheets in the book.
# All sheets not already loaded will be loaded.
def sheets(self):
for sheetx in xrange(self.nsheets):
if not self._sheet_list[sheetx]:
self.get_sheet(sheetx)
return self._sheet_list[:]
##
# @param sheetx Sheet index in range(nsheets)
# @return An object of the Sheet class
def sheet_by_index(self, sheetx):
return self._sheet_list[sheetx] or self.get_sheet(sheetx)
##
# @param sheet_name Name of sheet required
# @return An object of the Sheet class
def sheet_by_name(self, sheet_name):
try:
sheetx = self._sheet_names.index(sheet_name)
except ValueError:
raise XLRDError('No sheet named <%r>' % sheet_name)
return self.sheet_by_index(sheetx)
##
# @return A list of the names of all the worksheets in the workbook file.
# This information is available even when no sheets have yet been loaded.
def sheet_names(self):
return self._sheet_names[:]
##
# @param sheet_name_or_index Name or index of sheet enquired upon
# @return true if sheet is loaded, false otherwise
# <br /> -- New in version 0.7.1
def sheet_loaded(self, sheet_name_or_index):
# using type(1) because int won't work with Python 2.1
if isinstance(sheet_name_or_index, type(1)):
sheetx = sheet_name_or_index
else:
try:
sheetx = self._sheet_names.index(sheet_name_or_index)
except ValueError:
raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
return self._sheet_list[sheetx] and True or False # Python 2.1 again
##
# @param sheet_name_or_index Name or index of sheet to be unloaded.
# <br /> -- New in version 0.7.1
def unload_sheet(self, sheet_name_or_index):
# using type(1) because int won't work with Python 2.1
if isinstance(sheet_name_or_index, type(1)):
sheetx = sheet_name_or_index
else:
try:
sheetx = self._sheet_names.index(sheet_name_or_index)
except ValueError:
raise XLRDError('No sheet named <%r>' % sheet_name_or_index)
self._sheet_list[sheetx] = None
##
# A mapping from (lower_case_name, scope) to a single Name object.
# <br /> -- New in version 0.6.0
name_and_scope_map = {}
##
# A mapping from lower_case_name to a list of Name objects. The list is
# sorted in scope order. Typically there will be one item (of global scope)
# in the list.
# <br /> -- New in version 0.6.0
name_map = {}
def __init__(self):
self._sheet_list = []
self._sheet_names = []
self._sheet_visibility = [] # from BOUNDSHEET record
self.nsheets = 0
self._sh_abs_posn = [] # sheet's absolute position in the stream
self._sharedstrings = []
self.raw_user_name = False
self._sheethdr_count = 0 # BIFF 4W only
self.builtinfmtcount = -1 # unknown as yet. BIFF 3, 4S, 4W
self.initialise_format_info()
self._all_sheets_count = 0 # includes macro & VBA sheets
self._supbook_count = 0
self._supbook_locals_inx = None
self._supbook_addins_inx = None
self._all_sheets_map = [] # maps an all_sheets index to a calc-sheets index (or -1)
self._externsheet_info = []
self._externsheet_type_b57 = []
self._extnsht_name_from_num = {}
self._sheet_num_from_name = {}
self._extnsht_count = 0
self._supbook_types = []
self._resources_released = 0
self.addin_func_names = []
self.name_obj_list = []
self.colour_map = {}
self.palette_record = []
self.xf_list = []
self.style_name_map = {}
def biff2_8_load(self, filename=None, file_contents=None,
logfile=sys.stdout, verbosity=0, pickleable=True, use_mmap=USE_MMAP,
encoding_override=None,
formatting_info=False,
on_demand=False,
):
# DEBUG = 0
self.logfile = logfile
self.verbosity = verbosity
self.pickleable = pickleable
self.use_mmap = use_mmap and MMAP_AVAILABLE
self.encoding_override = encoding_override
self.formatting_info = formatting_info
self.on_demand = on_demand
need_close_filestr = 0
if not file_contents:
if python_version < (2, 2) and self.use_mmap:
# need to open for update
open_mode = "r+b"
else:
open_mode = "rb"
retry = False
try:
f = open(filename, open_mode)
except IOError:
e, v = sys.exc_info()[:2]
if open_mode == "r+b" \
and (v.errno == 13 or v.strerror == "Permission denied"):
# Maybe the file is read-only
retry = True
self.use_mmap = False
else:
raise
if retry:
f = open(filename, "rb")
if self.use_mmap:
f.seek(0, 2) # EOF
size = f.tell()
f.seek(0, 0) # BOF
if python_version < (2, 2):
filestr = mmap.mmap(f.fileno(), size)
else:
filestr = mmap.mmap(f.fileno(), size, access=mmap.ACCESS_READ)
need_close_filestr = 1
self.stream_len = size
else:
filestr = f.read()
self.stream_len = len(filestr)
f.close()
else:
filestr = file_contents
self.stream_len = len(file_contents)
self.base = 0
if filestr[:8] != compdoc.SIGNATURE:
# got this one at the antique store
self.mem = filestr
else:
cd = compdoc.CompDoc(filestr, logfile=self.logfile)
if USE_FANCY_CD:
for qname in [u'Workbook', u'Book']:
self.mem, self.base, self.stream_len = cd.locate_named_stream(qname)
if self.mem: break
else:
raise XLRDError("Can't find workbook in OLE2 compound document")
else:
for qname in [u'Workbook', u'Book']:
self.mem = cd.get_named_stream(qname)
if self.mem: break
else:
raise XLRDError("Can't find workbook in OLE2 compound document")
self.stream_len = len(self.mem)
del cd
if self.mem is not filestr:
if need_close_filestr:
filestr.close()
del filestr
self._position = self.base
if DEBUG:
print >> self.logfile, "mem: %s, base: %d, len: %d" % (type(self.mem), self.base, self.stream_len)
def initialise_format_info(self):
# needs to be done once per sheet for BIFF 4W :-(
self.format_map = {}
self.format_list = []
self.xfcount = 0
self.actualfmtcount = 0 # number of FORMAT records seen so far
self._xf_index_to_xl_type_map = {}
self._xf_epilogue_done = 0
self.xf_list = []
self.font_list = []
def release_resources(self):
self._resources_released = 1
del self.mem
del self._sharedstrings
def get2bytes(self):
pos = self._position
buff_two = self.mem[pos:pos+2]
lenbuff = len(buff_two)
self._position += lenbuff
if lenbuff < 2:
return MY_EOF
lo, hi = buff_two
return (ord(hi) << 8) | ord(lo)
def get_record_parts(self):
pos = self._position
mem = self.mem
code, length = unpack('<HH', mem[pos:pos+4])
pos += 4
data = mem[pos:pos+length]
self._position = pos + length
return (code, length, data)
def get_record_parts_conditional(self, reqd_record):
pos = self._position
mem = self.mem
code, length = unpack('<HH', mem[pos:pos+4])
if code != reqd_record:
return (None, 0, '')
pos += 4
data = mem[pos:pos+length]
self._position = pos + length
return (code, length, data)
def get_sheet(self, sh_number, update_pos=True):
if self._resources_released:
raise XLRDError("Can't load sheets after releasing resources.")
if update_pos:
self._position = self._sh_abs_posn[sh_number]
_unused_biff_version = self.getbof(XL_WORKSHEET)
# assert biff_version == self.biff_version ### FAILS
# Have an example where book is v7 but sheet reports v8!!!
# It appears to work OK if the sheet version is ignored.
# Confirmed by Daniel Rentz: happens when Excel does "save as"
# creating an old version file; ignore version details on sheet BOF.
sh = sheet.Sheet(self,
self._position,
self._sheet_names[sh_number],
sh_number,
)
sh.read(self)
self._sheet_list[sh_number] = sh
return sh
def get_sheets(self):
# DEBUG = 0
if DEBUG: print >> self.logfile, "GET_SHEETS:", self._sheet_names, self._sh_abs_posn
for sheetno in xrange(len(self._sheet_names)):
if DEBUG: print >> self.logfile, "GET_SHEETS: sheetno =", sheetno, self._sheet_names, self._sh_abs_posn
self.get_sheet(sheetno)
def fake_globals_get_sheet(self): # for BIFF 4.0 and earlier
formatting.initialise_book(self)
fake_sheet_name = u'Sheet 1'
self._sheet_names = [fake_sheet_name]
self._sh_abs_posn = [0]
self._sheet_visibility = [0] # one sheet, visible
self._sheet_list.append(None) # get_sheet updates _sheet_list but needs a None beforehand
self.get_sheets()
def handle_boundsheet(self, data):
# DEBUG = 1
bv = self.biff_version
self.derive_encoding()
if DEBUG:
fprintf(self.logfile, "BOUNDSHEET: bv=%d data %r\n", bv, data);
if bv == 45: # BIFF4W
#### Not documented in OOo docs ...
# In fact, the *only* data is the name of the sheet.
sheet_name = unpack_string(data, 0, self.encoding, lenlen=1)
visibility = 0
sheet_type = XL_BOUNDSHEET_WORKSHEET # guess, patch later
if len(self._sh_abs_posn) == 0:
abs_posn = self._sheetsoffset + self.base
# Note (a) this won't be used
# (b) it's the position of the SHEETHDR record
# (c) add 11 to get to the worksheet BOF record
else:
abs_posn = -1 # unknown
else:
offset, visibility, sheet_type = unpack('<iBB', data[0:6])
abs_posn = offset + self.base # because global BOF is always at posn 0 in the stream
if bv < BIFF_FIRST_UNICODE:
sheet_name = unpack_string(data, 6, self.encoding, lenlen=1)
else:
sheet_name = unpack_unicode(data, 6, lenlen=1)
if DEBUG or self.verbosity >= 2:
fprintf(self.logfile,
"BOUNDSHEET: inx=%d vis=%r sheet_name=%r abs_posn=%d sheet_type=0x%02x\n",
self._all_sheets_count, visibility, sheet_name, abs_posn, sheet_type)
self._all_sheets_count += 1
if sheet_type != XL_BOUNDSHEET_WORKSHEET:
self._all_sheets_map.append(-1)
descr = {
1: 'Macro sheet',
2: 'Chart',
6: 'Visual Basic module',
}.get(sheet_type, 'UNKNOWN')
fprintf(self.logfile,
"NOTE *** Ignoring non-worksheet data named %r (type 0x%02x = %s)\n",
sheet_name, sheet_type, descr)
else:
snum = len(self._sheet_names)
self._all_sheets_map.append(snum)
self._sheet_names.append(sheet_name)
self._sh_abs_posn.append(abs_posn)
self._sheet_visibility.append(visibility)
self._sheet_num_from_name[sheet_name] = snum
def handle_builtinfmtcount(self, data):
### N.B. This count appears to be utterly useless.
# DEBUG = 1
builtinfmtcount = unpack('<H', data[0:2])[0]
if DEBUG: fprintf(self.logfile, "BUILTINFMTCOUNT: %r\n", builtinfmtcount)
self.builtinfmtcount = builtinfmtcount
def derive_encoding(self):
if self.encoding_override:
self.encoding = self.encoding_override
elif self.codepage is None:
if self.biff_version < 80:
fprintf(self.logfile,
"*** No CODEPAGE record, no encoding_override: will use 'ascii'\n")
self.encoding = 'ascii'
else:
self.codepage = 1200 # utf16le
if self.verbosity >= 2:
fprintf(self.logfile, "*** No CODEPAGE record; assuming 1200 (utf_16_le)\n")
else:
codepage = self.codepage
if encoding_from_codepage.has_key(codepage):
encoding = encoding_from_codepage[codepage]
elif 300 <= codepage <= 1999:
encoding = 'cp' + str(codepage)
else:
encoding = 'unknown_codepage_' + str(codepage)
if DEBUG or (self.verbosity and encoding != self.encoding) :
fprintf(self.logfile, "CODEPAGE: codepage %r -> encoding %r\n", codepage, encoding)
self.encoding = encoding
if self.codepage != 1200: # utf_16_le
# If we don't have a codec that can decode ASCII into Unicode,
# we're well & truly stuffed -- let the punter know ASAP.
try:
_unused = unicode('trial', self.encoding)
except:
ei = sys.exc_info()[:2]
fprintf(self.logfile,
"ERROR *** codepage %r -> encoding %r -> %s: %s\n",
self.codepage, self.encoding, ei[0].__name__.split(".")[-1], ei[1])
raise
if self.raw_user_name:
strg = unpack_string(self.user_name, 0, self.encoding, lenlen=1)
strg = strg.rstrip()
# if DEBUG:
# print "CODEPAGE: user name decoded from %r to %r" % (self.user_name, strg)
self.user_name = strg
self.raw_user_name = False
return self.encoding
def handle_codepage(self, data):
# DEBUG = 0
codepage = unpack('<H', data[0:2])[0]
self.codepage = codepage
self.derive_encoding()
def handle_country(self, data):
countries = unpack('<HH', data[0:4])
if self.verbosity: print >> self.logfile, "Countries:", countries
# Note: in BIFF7 and earlier, country record was put (redundantly?) in each worksheet.
assert self.countries == (0, 0) or self.countries == countries
self.countries = countries
def handle_datemode(self, data):
datemode = unpack('<H', data[0:2])[0]
if DEBUG or self.verbosity:
fprintf(self.logfile, "DATEMODE: datemode %r\n", datemode)
assert datemode in (0, 1)
self.datemode = datemode
def handle_externname(self, data):
blah = DEBUG or self.verbosity >= 2
if self.biff_version >= 80:
option_flags, other_info =unpack("<HI", data[:6])
pos = 6
name, pos = unpack_unicode_update_pos(data, pos, lenlen=1)
extra = data[pos:]
if self._supbook_types[-1] == SUPBOOK_ADDIN:
self.addin_func_names.append(name)
if blah:
fprintf(self.logfile,
"EXTERNNAME: sbktype=%d oflags=0x%04x oinfo=0x%08x name=%r extra=%r\n",
self._supbook_types[-1], option_flags, other_info, name, extra)
def handle_externsheet(self, data):
self.derive_encoding() # in case CODEPAGE record missing/out of order/wrong
self._extnsht_count += 1 # for use as a 1-based index
blah1 = DEBUG or self.verbosity >= 1
blah2 = DEBUG or self.verbosity >= 2
if self.biff_version >= 80:
num_refs = unpack("<H", data[0:2])[0]
bytes_reqd = num_refs * 6 + 2
while len(data) < bytes_reqd:
if blah1:
fprintf(
self.logfile,
"INFO: EXTERNSHEET needs %d bytes, have %d\n",
bytes_reqd, len(data),
)
code2, length2, data2 = self.get_record_parts()
if code2 != XL_CONTINUE:
raise XLRDError("Missing CONTINUE after EXTERNSHEET record")
data += data2
pos = 2
for k in xrange(num_refs):
info = unpack("<HHH", data[pos:pos+6])
ref_recordx, ref_first_sheetx, ref_last_sheetx = info
self._externsheet_info.append(info)
pos += 6
if blah2:
fprintf(
self.logfile,
"EXTERNSHEET(b8): k = %2d, record = %2d, first_sheet = %5d, last sheet = %5d\n",
k, ref_recordx, ref_first_sheetx, ref_last_sheetx,
)
else:
nc, ty = unpack("<BB", data[:2])
if blah2:
print "EXTERNSHEET(b7-):"
hex_char_dump(data, 0, len(data))
msg = {
1: "Encoded URL",
2: "Current sheet!!",
3: "Specific sheet in own doc't",
4: "Nonspecific sheet in own doc't!!",
}.get(ty, "Not encoded")
print " %3d chars, type is %d (%s)" % (nc, ty, msg)
if ty == 3:
sheet_name = unicode(data[2:nc+2], self.encoding)
self._extnsht_name_from_num[self._extnsht_count] = sheet_name
if blah2: print self._extnsht_name_from_num
if not (1 <= ty <= 4):
ty = 0
self._externsheet_type_b57.append(ty)
def handle_filepass(self, data):
if self.verbosity >= 2:
logf = self.logfile
fprintf(logf, "FILEPASS:\n")
hex_char_dump(data, 0, len(data), base=0, fout=logf)
if self.biff_version >= 80:
kind1, = unpack('<H', data[:2])
if kind1 == 0: # weak XOR encryption
key, hash_value = unpack('<HH', data[2:])
fprintf(logf,
'weak XOR: key=0x%04x hash=0x%04x\n',
key, hash_value)
elif kind1 == 1:
kind2, = unpack('<H', data[4:6])
if kind2 == 1: # BIFF8 standard encryption
caption = "BIFF8 std"
elif kind2 == 2:
caption = "BIFF8 strong"
else:
caption = "** UNKNOWN ENCRYPTION METHOD **"
fprintf(logf, "%s\n", caption)
raise XLRDError("Workbook is encrypted")
def handle_name(self, data):
blah = DEBUG or self.verbosity >= 2
bv = self.biff_version
if bv < 50:
return
self.derive_encoding()
# print
# hex_char_dump(data, 0, len(data))
(
option_flags, kb_shortcut, name_len, fmla_len, extsht_index, sheet_index,
menu_text_len, description_text_len, help_topic_text_len, status_bar_text_len,
) = unpack("<HBBHHH4B", data[0:14])
nobj = Name()
nobj.book = self ### CIRCULAR ###
name_index = len(self.name_obj_list)
nobj.name_index = name_index
self.name_obj_list.append(nobj)
nobj.option_flags = option_flags
for attr, mask, nshift in (
('hidden', 1, 0),
('func', 2, 1),
('vbasic', 4, 2),
('macro', 8, 3),
('complex', 0x10, 4),
('builtin', 0x20, 5),
('funcgroup', 0xFC0, 6),
('binary', 0x1000, 12),
):
setattr(nobj, attr, (option_flags & mask) >> nshift)
macro_flag = " M"[nobj.macro]
if bv < 80:
internal_name, pos = unpack_string_update_pos(data, 14, self.encoding, known_len=name_len)
else:
internal_name, pos = unpack_unicode_update_pos(data, 14, known_len=name_len)
nobj.extn_sheet_num = extsht_index
nobj.excel_sheet_index = sheet_index
nobj.scope = None # patched up in the names_epilogue() method
if blah:
print "NAME[%d]:%s oflags=%d, name_len=%d, fmla_len=%d, extsht_index=%d, sheet_index=%d, name=%r" \
% (name_index, macro_flag, option_flags, name_len,
fmla_len, extsht_index, sheet_index, internal_name)
name = internal_name
if nobj.builtin:
name = builtin_name_from_code.get(name, "??Unknown??")
if blah: print " builtin: %s" % name
nobj.name = name
nobj.raw_formula = data[pos:]
nobj.basic_formula_len = fmla_len
nobj.evaluated = 0
if blah:
nobj.dump(
self.logfile,
header="--- handle_name: name[%d] ---" % name_index,
footer="-------------------",
)
def names_epilogue(self):
blah = self.verbosity >= 2
f = self.logfile
if blah:
print >> f, "+++++ names_epilogue +++++"
print >> f, "_all_sheets_map", self._all_sheets_map
print >> f, "_extnsht_name_from_num", self._extnsht_name_from_num
print >> f, "_sheet_num_from_name", self._sheet_num_from_name
num_names = len(self.name_obj_list)
for namex in range(num_names):
nobj = self.name_obj_list[namex]
# Convert from excel_sheet_index to scope.
# This is done here because in BIFF7 and earlier, the
# BOUNDSHEET records (from which _all_sheets_map is derived)
# come after the NAME records.
if self.biff_version >= 80:
sheet_index = nobj.excel_sheet_index
if sheet_index == 0:
intl_sheet_index = -1 # global
elif 1 <= sheet_index <= len(self._all_sheets_map):
intl_sheet_index = self._all_sheets_map[sheet_index-1]
if intl_sheet_index == -1: # maps to a macro or VBA sheet
intl_sheet_index = -2 # valid sheet reference but not useful
else:
# huh?
intl_sheet_index = -3 # invalid
elif 50 <= self.biff_version <= 70:
sheet_index = nobj.extn_sheet_num
if sheet_index == 0:
intl_sheet_index = -1 # global
else:
sheet_name = self._extnsht_name_from_num[sheet_index]
intl_sheet_index = self._sheet_num_from_name.get(sheet_name, -2)
nobj.scope = intl_sheet_index
for namex in range(num_names):
nobj = self.name_obj_list[namex]
# Parse the formula ...
if nobj.macro or nobj.binary: continue
if nobj.evaluated: continue
evaluate_name_formula(self, nobj, namex, blah=blah)
if self.verbosity >= 2:
print >> f, "---------- name object dump ----------"
for namex in range(num_names):
nobj = self.name_obj_list[namex]
nobj.dump(f, header="--- name[%d] ---" % namex)
print >> f, "--------------------------------------"
#
# Build some dicts for access to the name objects
#
name_and_scope_map = {} # (name.lower(), scope): Name_object
name_map = {} # name.lower() : list of Name_objects (sorted in scope order)
for namex in range(num_names):
nobj = self.name_obj_list[namex]
name_lcase = nobj.name.lower()
key = (name_lcase, nobj.scope)
if name_and_scope_map.has_key(key):
msg = 'Duplicate entry %r in name_and_scope_map' % (key, )
if 0:
raise XLRDError(msg)
else:
if self.verbosity:
print >> f, msg
name_and_scope_map[key] = nobj
if name_map.has_key(name_lcase):
name_map[name_lcase].append((nobj.scope, nobj))
else:
name_map[name_lcase] = [(nobj.scope, nobj)]
for key in name_map.keys():
alist = name_map[key]
alist.sort()
name_map[key] = [x[1] for x in alist]
self.name_and_scope_map = name_and_scope_map
self.name_map = name_map
def handle_obj(self, data):
# Not doing much handling at all.
# Worrying about embedded (BOF ... EOF) substreams is done elsewhere.
# DEBUG = 1
obj_type, obj_id = unpack('<HI', data[4:10])
# if DEBUG: print "---> handle_obj type=%d id=0x%08x" % (obj_type, obj_id)
def handle_supbook(self, data):
self._supbook_types.append(None)
blah = DEBUG or self.verbosity >= 2
if 0:
print "SUPBOOK:"
hex_char_dump(data, 0, len(data))
num_sheets = unpack("<H", data[0:2])[0]
sbn = self._supbook_count
self._supbook_count += 1
if data[2:4] == "\x01\x04":
self._supbook_types[-1] = SUPBOOK_INTERNAL
self._supbook_locals_inx = self._supbook_count - 1
if blah:
print "SUPBOOK[%d]: internal 3D refs; %d sheets" % (sbn, num_sheets)
print " _all_sheets_map", self._all_sheets_map
return
if data[0:4] == "\x01\x00\x01\x3A":
self._supbook_types[-1] = SUPBOOK_ADDIN
self._supbook_addins_inx = self._supbook_count - 1
if blah: print "SUPBOOK[%d]: add-in functions" % sbn
return
url, pos = unpack_unicode_update_pos(data, 2, lenlen=2)
if num_sheets == 0:
self._supbook_types[-1] = SUPBOOK_DDEOLE
if blah: print "SUPBOOK[%d]: DDE/OLE document = %r" % (sbn, url)
return
self._supbook_types[-1] = SUPBOOK_EXTERNAL
if blah: print "SUPBOOK[%d]: url = %r" % (sbn, url)
sheet_names = []
for x in range(num_sheets):
shname, pos = unpack_unicode_update_pos(data, pos, lenlen=2)
sheet_names.append(shname)
if blah: print " sheet %d: %r" % (x, shname)
def handle_sheethdr(self, data):
# This a BIFF 4W special.
# The SHEETHDR record is followed by a (BOF ... EOF) substream containing
# a worksheet.
# DEBUG = 1
self.derive_encoding()
sheet_len = unpack('<i', data[:4])[0]
sheet_name = unpack_string(data, 4, self.encoding, lenlen=1)
sheetno = self._sheethdr_count
assert sheet_name == self._sheet_names[sheetno]
self._sheethdr_count += 1
BOF_posn = self._position
posn = BOF_posn - 4 - len(data)
if DEBUG: print >> self.logfile, 'SHEETHDR %d at posn %d: len=%d name=%r' % (sheetno, posn, sheet_len, sheet_name)
self.initialise_format_info()
if DEBUG: print >> self.logfile, 'SHEETHDR: xf epilogue flag is %d' % self._xf_epilogue_done
self._sheet_list.append(None) # get_sheet updates _sheet_list but needs a None beforehand
self.get_sheet(sheetno, update_pos=False)
if DEBUG: print >> self.logfile, 'SHEETHDR: posn after get_sheet() =', self._position
self._position = BOF_posn + sheet_len
def handle_sheetsoffset(self, data):
# DEBUG = 0
posn = unpack('<i', data)[0]
if DEBUG: print >> self.logfile, 'SHEETSOFFSET:', posn
self._sheetsoffset = posn
def handle_sst(self, data):
# DEBUG = 1
if DEBUG:
print >> self.logfile, "SST Processing"
t0 = time.time()
nbt = len(data)
strlist = [data]
uniquestrings = unpack('<i', data[4:8])[0]
if DEBUG or self.verbosity >= 2:
fprintf(self.logfile, "SST: unique strings: %d\n", uniquestrings)
while 1:
code, nb, data = self.get_record_parts_conditional(XL_CONTINUE)
if code is None:
break
nbt += nb
if DEBUG >= 2:
fprintf(self.logfile, "CONTINUE: adding %d bytes to SST -> %d\n", nb, nbt)
strlist.append(data)
self._sharedstrings = unpack_SST_table(strlist, uniquestrings)
if DEBUG:
t1 = time.time()
print >> self.logfile, "SST processing took %.2f seconds" % (t1 - t0, )
def handle_writeaccess(self, data):
# DEBUG = 0
if self.biff_version < 80:
if not self.encoding:
self.raw_user_name = True
self.user_name = data
return
strg = unpack_string(data, 0, self.encoding, lenlen=1)
else:
strg = unpack_unicode(data, 0, lenlen=2)
if DEBUG: print >> self.logfile, "WRITEACCESS: %d bytes; raw=%d %r" % (len(data), self.raw_user_name, strg)
strg = strg.rstrip()
self.user_name = strg
def parse_globals(self):
# DEBUG = 0
# no need to position, just start reading (after the BOF)
formatting.initialise_book(self)
while 1:
rc, length, data = self.get_record_parts()
if DEBUG: print "parse_globals: record code is 0x%04x" % rc
if rc == XL_SST:
self.handle_sst(data)
elif rc == XL_FONT or rc == XL_FONT_B3B4:
self.handle_font(data)
elif rc == XL_FORMAT: # XL_FORMAT2 is BIFF <= 3.0, can't appear in globals
self.handle_format(data)
elif rc == XL_XF:
self.handle_xf(data)
elif rc == XL_BOUNDSHEET:
self.handle_boundsheet(data)
elif rc == XL_DATEMODE:
self.handle_datemode(data)
elif rc == XL_CODEPAGE:
self.handle_codepage(data)
elif rc == XL_COUNTRY:
self.handle_country(data)
elif rc == XL_EXTERNNAME:
self.handle_externname(data)
elif rc == XL_EXTERNSHEET:
self.handle_externsheet(data)
elif rc == XL_FILEPASS:
self.handle_filepass(data)
elif rc == XL_WRITEACCESS:
self.handle_writeaccess(data)
elif rc == XL_SHEETSOFFSET:
self.handle_sheetsoffset(data)
elif rc == XL_SHEETHDR:
self.handle_sheethdr(data)
elif rc == XL_SUPBOOK:
self.handle_supbook(data)
elif rc == XL_NAME:
self.handle_name(data)
elif rc == XL_PALETTE:
self.handle_palette(data)
elif rc == XL_STYLE:
self.handle_style(data)
elif rc & 0xff == 9:
print >> self.logfile, "*** Unexpected BOF at posn %d: 0x%04x len=%d data=%r" \
% (self._position - length - 4, rc, length, data)
elif rc == XL_EOF:
self.xf_epilogue()
self.names_epilogue()
self.palette_epilogue()
if not self.encoding:
self.derive_encoding()
if self.biff_version == 45:
# DEBUG = 0
if DEBUG: print "global EOF: position", self._position
# if DEBUG:
# pos = self._position - 4
# print repr(self.mem[pos:pos+40])
return
else:
# if DEBUG:
# print "parse_globals: ignoring record code 0x%04x" % rc
pass
def read(self, pos, length):
data = self.mem[pos:pos+length]
self._position = pos + len(data)
return data
def getbof(self, rqd_stream):
# DEBUG = 1
# if DEBUG: print >> self.logfile, "getbof(): position", self._position
if DEBUG: print >> self.logfile, "reqd: 0x%04x" % rqd_stream
def bof_error(msg):
raise XLRDError('Unsupported format, or corrupt file: ' + msg)
savpos = self._position
opcode = self.get2bytes()
if opcode == MY_EOF:
bof_error('Expected BOF record; met end of file')
if opcode not in bofcodes:
bof_error('Expected BOF record; found %r' % self.mem[savpos:savpos+8])
length = self.get2bytes()
if length == MY_EOF:
bof_error('Incomplete BOF record[1]; met end of file')
if length < boflen[opcode] or length > 20:
bof_error(
'Invalid length (%d) for BOF record type 0x%04x'
% (length, opcode))
data = self.read(self._position, length);
if DEBUG: print >> self.logfile, "\ngetbof(): data=%r" % data
if len(data) < length:
bof_error('Incomplete BOF record[2]; met end of file')
version1 = opcode >> 8
version2, streamtype = unpack('<HH', data[0:4])
if DEBUG:
print >> self.logfile, "getbof(): op=0x%04x version2=0x%04x streamtype=0x%04x" \
% (opcode, version2, streamtype)
bof_offset = self._position - 4 - length
if DEBUG:
print >> self.logfile, "getbof(): BOF found at offset %d; savpos=%d" \
% (bof_offset, savpos)
version = build = year = 0
if version1 == 0x08:
build, year = unpack('<HH', data[4:8])
if version2 == 0x0600:
version = 80
elif version2 == 0x0500:
if year < 1994 or build in (2412, 3218, 3321):
version = 50
else:
version = 70
else:
# dodgy one, created by a 3rd-party tool
version = {
0x0000: 21,
0x0007: 21,
0x0200: 21,
0x0300: 30,
0x0400: 40,
}.get(version2, 0)
elif version1 in (0x04, 0x02, 0x00):
version = {0x04: 40, 0x02: 30, 0x00: 21}[version1]
if version == 40 and streamtype == XL_WORKBOOK_GLOBALS_4W:
version = 45 # i.e. 4W
if DEBUG or self.verbosity >= 2:
print >> self.logfile, \
"BOF: op=0x%04x vers=0x%04x stream=0x%04x buildid=%d buildyr=%d -> BIFF%d" \
% (opcode, version2, streamtype, build, year, version)
got_globals = streamtype == XL_WORKBOOK_GLOBALS or (
version == 45 and streamtype == XL_WORKBOOK_GLOBALS_4W)
if (rqd_stream == XL_WORKBOOK_GLOBALS and got_globals) or streamtype == rqd_stream:
return version
if version < 50 and streamtype == XL_WORKSHEET:
return version
if version >= 50 and streamtype == 0x0100:
bof_error("Workspace file -- no spreadsheet data")
bof_error(
'BOF not workbook/worksheet: op=0x%04x vers=0x%04x strm=0x%04x build=%d year=%d -> BIFF%d' \
% (opcode, version2, streamtype, build, year, version)
)
# === helper functions
def expand_cell_address(inrow, incol):
# Ref : OOo docs, "4.3.4 Cell Addresses in BIFF8"
outrow = inrow
if incol & 0x8000:
if outrow >= 32768:
outrow -= 65536
relrow = 1
else:
relrow = 0
outcol = incol & 0xFF
if incol & 0x4000:
if outcol >= 128:
outcol -= 256
relcol = 1
else:
relcol = 0
return outrow, outcol, relrow, relcol
def colname(colx, _A2Z="ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
assert colx >= 0
name = ''
while 1:
quot, rem = divmod(colx, 26)
name = _A2Z[rem] + name
if not quot:
return name
colx = quot - 1
def display_cell_address(rowx, colx, relrow, relcol):
if relrow:
rowpart = "(*%s%d)" % ("+-"[rowx < 0], abs(rowx))
else:
rowpart = "$%d" % (rowx+1,)
if relcol:
colpart = "(*%s%d)" % ("+-"[colx < 0], abs(colx))
else:
colpart = "$" + colname(colx)
return colpart + rowpart
def unpack_SST_table(datatab, nstrings):
"Return list of strings"
datainx = 0
ndatas = len(datatab)
data = datatab[0]
datalen = len(data)
pos = 8
strings = []
strappend = strings.append
local_unpack = unpack
local_min = min
local_ord = ord
latin_1 = "latin_1"
for _unused_i in xrange(nstrings):
nchars = local_unpack('<H', data[pos:pos+2])[0]
pos += 2
options = local_ord(data[pos])
pos += 1
rtsz = 0
if options & 0x08: # richtext
rtsz = 4 * local_unpack('<H', data[pos:pos+2])[0]
pos += 2
if options & 0x04: # phonetic
rtsz += local_unpack('<i', data[pos:pos+4])[0]
pos += 4
accstrg = u''
charsgot = 0
while 1:
charsneed = nchars - charsgot
if options & 0x01:
# Uncompressed UTF-16
charsavail = local_min((datalen - pos) >> 1, charsneed)
rawstrg = data[pos:pos+2*charsavail]
# if DEBUG: print "SST U16: nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg)
try:
accstrg += unicode(rawstrg, "utf_16_le")
except:
# print "SST U16: nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg)
# Probable cause: dodgy data e.g. unfinished surrogate pair.
# E.g. file unicode2.xls in pyExcelerator's examples has cells containing
# unichr(i) for i in range(0x100000)
# so this will include 0xD800 etc
raise
pos += 2*charsavail
else:
# Note: this is COMPRESSED (not ASCII!) encoding!!!
charsavail = local_min(datalen - pos, charsneed)
rawstrg = data[pos:pos+charsavail]
# if DEBUG: print "SST CMPRSD: nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg)
accstrg += unicode(rawstrg, latin_1)
pos += charsavail
charsgot += charsavail
if charsgot == nchars:
break
datainx += 1
data = datatab[datainx]
datalen = len(data)
options = local_ord(data[0])
pos = 1
pos += rtsz # size of richtext & phonetic stuff to skip
# also allow for the rich text etc being split ...
if pos >= datalen:
# adjust to correct position in next record
pos = pos - datalen
datainx += 1
if datainx < ndatas:
data = datatab[datainx]
datalen = len(data)
else:
assert _unused_i == nstrings - 1
strappend(accstrg)
return strings
| gpl-2.0 |
sryza/spark | python/examples/kmeans.py | 10 | 2247 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This example requires numpy (http://www.numpy.org/)
"""
import sys
import numpy as np
from pyspark import SparkContext
def parseVector(line):
return np.array([float(x) for x in line.split(' ')])
def closestPoint(p, centers):
bestIndex = 0
closest = float("+inf")
for i in range(len(centers)):
tempDist = np.sum((p - centers[i]) ** 2)
if tempDist < closest:
closest = tempDist
bestIndex = i
return bestIndex
if __name__ == "__main__":
if len(sys.argv) < 5:
print >> sys.stderr, "Usage: kmeans <master> <file> <k> <convergeDist>"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonKMeans")
lines = sc.textFile(sys.argv[2])
data = lines.map(parseVector).cache()
K = int(sys.argv[3])
convergeDist = float(sys.argv[4])
# TODO: change this after we port takeSample()
#kPoints = data.takeSample(False, K, 34)
kPoints = data.take(K)
tempDist = 1.0
while tempDist > convergeDist:
closest = data.map(
lambda p : (closestPoint(p, kPoints), (p, 1)))
pointStats = closest.reduceByKey(
lambda (x1, y1), (x2, y2): (x1 + x2, y1 + y2))
newPoints = pointStats.map(
lambda (x, (y, z)): (x, y / z)).collect()
tempDist = sum(np.sum((kPoints[x] - y) ** 2) for (x, y) in newPoints)
for (x, y) in newPoints:
kPoints[x] = y
print "Final centers: " + str(kPoints)
| apache-2.0 |
dongjoon-hyun/spark | python/pyspark/serializers.py | 13 | 20602 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses :class:`PickleSerializer` to serialize objects using Python's
`cPickle` serializer, which can serialize nearly any Python object.
Other serializers, like :class:`MarshalSerializer`, support fewer datatypes but can be
faster.
Examples
--------
The serializer is chosen when creating :class:`SparkContext`:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's `batchSize`
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
import pickle
pickle_protocol = pickle.HIGHEST_PROTOCOL
from pyspark import cloudpickle
from pyspark.util import print_exec # type: ignore
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where `length` is a 32-bit integer and data is `length` bytes.
"""
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in range(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {} # type: ignore
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = collections.namedtuple.__kwdefaults__
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, pickle_protocol)
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, pickle_protocol)
except pickle.PickleError:
raise
except Exception as e:
emsg = str(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
@property
def closed(self):
"""
Return True if the `wrapped` object has been closed.
NOTE: this property is required by pyarrow to be used as a file-like object in
pyarrow.RecordBatchStreamWriter from ArrowStreamSerializer
"""
return self.wrapped.closed
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| apache-2.0 |
wscullin/spack | var/spack/repos/builtin/packages/py-appdirs/package.py | 3 | 2067 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyAppdirs(PythonPackage):
"""A small Python module for determining appropriate platform-specific
dirs, e.g. a "user data dir"."""
homepage = "https://github.com/ActiveState/appdirs"
url = "https://pypi.io/packages/source/a/appdirs/appdirs-1.4.3.tar.gz"
import_modules = ['appdirs']
version('1.4.3', '44c679904082a2133f5566c8a0d3ab42')
version('1.4.0', '1d17b4c9694ab84794e228f28dc3275b')
patch('setuptools-import.patch', when='@:1.4.0')
# Newer versions of setuptools require appdirs. Although setuptools is an
# optional dependency of appdirs, if it is not found, setup.py will
# fallback on distutils.core instead. Don't add a setuptools dependency
# or we won't be able to bootstrap setuptools.
# depends_on('py-setuptools', type='build')
| lgpl-2.1 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/urllib.py | 2 | 61452 | """Open an arbitrary URL.
See the following document for more info on URLs:
"Names and Addresses, URIs, URLs, URNs, URCs", at
http://www.w3.org/pub/WWW/Addressing/Overview.html
See also the HTTP spec (from which the error codes are derived):
"HTTP - Hypertext Transfer Protocol", at
http://www.w3.org/pub/WWW/Protocols/
Related standards and specs:
- RFC1808: the "relative URL" spec. (authoritative status)
- RFC1738 - the "URL standard". (authoritative status)
- RFC1630 - the "URI spec". (informational status)
The object returned by URLopener().open(file) will differ per
protocol. All you know is that is has methods read(), readline(),
readlines(), fileno(), close() and info(). The read*(), fileno()
and close() methods work like those of open files.
The info() method returns a mimetools.Message object which can be
used to query various info about the object, if available.
(mimetools.Message objects are queried with the getheader() method.)
"""
import string
import socket
import os
import time
import sys
import base64
import re
from urlparse import urljoin as basejoin
__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve",
"urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus",
"urlencode", "url2pathname", "pathname2url", "splittag",
"localhost", "thishost", "ftperrors", "basejoin", "unwrap",
"splittype", "splithost", "splituser", "splitpasswd", "splitport",
"splitnport", "splitquery", "splitattr", "splitvalue",
"getproxies"]
__version__ = '1.17' # XXX This version is not always updated :-(
MAXFTPCACHE = 10 # Trim the ftp cache beyond this size
# Helper for non-unix systems
if os.name == 'nt':
from nturl2path import url2pathname, pathname2url
elif os.name == 'riscos':
from rourl2path import url2pathname, pathname2url
else:
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
return unquote(pathname)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
return quote(pathname)
# This really consists of two pieces:
# (1) a class which handles opening of all sorts of URLs
# (plus assorted utilities etc.)
# (2) a set of functions for parsing URLs
# XXX Should these be separated out into different modules?
# Shortcut for basic usage
_urlopener = None
def urlopen(url, data=None, proxies=None, context=None):
"""Create a file-like object for the specified URL to read from."""
from warnings import warnpy3k
print("1from warnings import warnpy3k")
warnpy3k("urllib.urlopen() has been removed in Python 3.0 in "
"favor of urllib2.urlopen()", stacklevel=2)
print("2warnpy3k")
global _urlopener
print("3global _urlopener")
if proxies is not None or context is not None:
opener = FancyURLopener(proxies=proxies, context=context)
elif not _urlopener:
print("4elif not _urlopener:")
opener = FancyURLopener()
print("5opener = FancyURLopener()")
_urlopener = opener
print("6_urlopener = opener")
else:
opener = _urlopener
print("7opener = _urlopener")
if data is None:
print("8if data is None:")
a = opener.open(url)
print('url')
print(a)
print('Returning a')
return a
else:
print("9else")
return opener.open(url, data)
def urlretrieve(url, filename=None, reporthook=None, data=None, context=None):
global _urlopener
if context is not None:
opener = FancyURLopener(context=context)
elif not _urlopener:
_urlopener = opener = FancyURLopener()
else:
opener = _urlopener
return opener.retrieve(url, filename, reporthook, data)
def urlcleanup():
if _urlopener:
_urlopener.cleanup()
_safe_quoters.clear()
ftpcache.clear()
# check for SSL
try:
import ssl
except:
_have_ssl = False
else:
_have_ssl = True
# exception raised when downloaded size does not match content-length
class ContentTooShortError(IOError):
def __init__(self, message, content):
IOError.__init__(self, message)
self.content = content
ftpcache = {}
class URLopener:
"""Class to open URLs.
This is a class rather than just a subroutine because we may need
more than one set of global protocol-specific options.
Note -- this is a base class for those who don't want the
automatic handling of errors type 302 (relocated) and 401
(authorization needed)."""
__tempfiles = None
version = "Python-urllib/%s" % __version__
# Constructor
def __init__(self, proxies=None, context=None, **x509):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
self.key_file = x509.get('key_file')
self.cert_file = x509.get('cert_file')
self.context = context
self.addheaders = [('User-Agent', self.version), ('Accept', '*/*')]
self.__tempfiles = []
self.__unlink = os.unlink # See cleanup()
self.tempcache = None
# Undocumented feature: if you assign {} to tempcache,
# it is used to cache files retrieved with
# self.retrieve(). This is not enabled by default
# since it does not work for changing documents (and I
# haven't got the logic to check expiration headers
# yet).
self.ftpcache = ftpcache
# Undocumented feature: you can use a different
# ftp cache by assigning to the .ftpcache member;
# in case you want logically independent URL openers
# XXX This is not threadsafe. Bah.
def __del__(self):
self.close()
def close(self):
self.cleanup()
def cleanup(self):
# This code sometimes runs when the rest of this module
# has already been deleted, so it can't use any globals
# or import anything.
if self.__tempfiles:
for file in self.__tempfiles:
try:
self.__unlink(file)
except OSError:
pass
del self.__tempfiles[:]
if self.tempcache:
self.tempcache.clear()
def addheader(self, *args):
"""Add a header to be used by the HTTP interface only
e.g. u.addheader('Accept', 'sound/basic')"""
self.addheaders.append(args)
# External interface
def open(self, fullurl, data=None):
print("open(self, fullurl, data=None):")
"""Use URLopener().open(file) instead of open(file, 'r')."""
fullurl = unwrap(toBytes(fullurl))
print("fullurl = unwrap(toBytes(fullurl))")
# percent encode url, fixing lame server errors for e.g, like space
# within url paths.
fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|")
print("fullurl = quote(fullurl, safe=%/:=&?~#+!$,;'@()*[]|")
if self.tempcache and fullurl in self.tempcache:
print("if self.tempcache and fullurl in self.tempcache:")
filename, headers = self.tempcache[fullurl]
print("filename, headers = self.tempcache[fullurl]")
fp = open(filename, 'rb')
print("fp = open(filename, 'rb')")
return addinfourl(fp, headers, fullurl)
urltype, url = splittype(fullurl)
print("urltype, url = splittype(fullurl)")
if not urltype:
print("if not urltype:")
urltype = 'file'
if urltype in self.proxies:
print("if urltype in self.proxies:")
proxy = self.proxies[urltype]
print("proxy = self.proxies[urltype]")
urltype, proxyhost = splittype(proxy)
print("urltype, proxyhost = splittype(proxy)")
host, selector = splithost(proxyhost)
print("host, selector = splithost(proxyhost)")
url = (host, fullurl) # Signal special case to open_*()
print("url = (host, fullurl)")
else:
proxy = None
print("proxy = None")
name = 'open_' + urltype
print("name = 'open_' + urltype")
self.type = urltype
name = name.replace('-', '_')
if not hasattr(self, name):
print("if not hasattr(self, name):")
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
return self.open_unknown(fullurl, data)
try:
print("try:")
if data is None:
print("if data is None:")
a = getattr(self, name)(url)
print(a)
return getattr(self, name)(url)
else:
print("else")
a = getattr(self, name)(url, data)
print(a)
return getattr(self, name)(url, data)
except socket.error, msg:
print("except socket.error, msg:")
raise IOError, ('socket error', msg), sys.exc_info()[2]
def open_unknown(self, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'unknown url type', type)
def open_unknown_proxy(self, proxy, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'invalid proxy for %s' % type, proxy)
# External interface
def retrieve(self, url, filename=None, reporthook=None, data=None):
"""retrieve(url) returns (filename, headers) for a local object
or (tempfilename, headers) for a remote object."""
url = unwrap(toBytes(url))
if self.tempcache and url in self.tempcache:
return self.tempcache[url]
type, url1 = splittype(url)
if filename is None and (not type or type == 'file'):
try:
fp = self.open_local_file(url1)
hdrs = fp.info()
fp.close()
return url2pathname(splithost(url1)[1]), hdrs
except IOError:
pass
fp = self.open(url, data)
try:
headers = fp.info()
if filename:
tfp = open(filename, 'wb')
else:
import tempfile
garbage, path = splittype(url)
garbage, path = splithost(path or "")
path, garbage = splitquery(path or "")
path, garbage = splitattr(path or "")
suffix = os.path.splitext(path)[1]
(fd, filename) = tempfile.mkstemp(suffix)
self.__tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
if self.tempcache is not None:
self.tempcache[url] = result
bs = 1024*8
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError("retrieval incomplete: got only %i out "
"of %i bytes" % (read, size), result)
return result
# Each method named open_<type> knows how to open that type of URL
def open_http(self, url, data=None):
"""Use HTTP protocol."""
import httplib
user_passwd = None
proxy_passwd= None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# check whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
# now we proceed with the url we want to obtain
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'http':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
if proxy_bypass(realhost):
host = realhost
#print "proxy via http:", host, selector
if not host: raise IOError, ('http error', 'no host given')
if proxy_passwd:
proxy_passwd = unquote(proxy_passwd)
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
user_passwd = unquote(user_passwd)
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTP(host)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type', 'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "http:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers, data)
def http_error(self, url, fp, errcode, errmsg, headers, data=None):
"""Handle http errors.
Derived class can override this, or provide specific handlers
named http_error_DDD where DDD is the 3-digit error code."""
# First check if there's a specific handler for this error
name = 'http_error_%d' % errcode
if hasattr(self, name):
method = getattr(self, name)
if data is None:
result = method(url, fp, errcode, errmsg, headers)
else:
result = method(url, fp, errcode, errmsg, headers, data)
if result: return result
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handler: close the connection and raise IOError."""
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
if _have_ssl:
def open_https(self, url, data=None):
"""Use HTTPS protocol."""
import httplib
user_passwd = None
proxy_passwd = None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# here, we determine, whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'https':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
#print "proxy via https:", host, selector
if not host: raise IOError, ('https error', 'no host given')
if proxy_passwd:
proxy_passwd = unquote(proxy_passwd)
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
user_passwd = unquote(user_passwd)
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTPS(host, 0,
key_file=self.key_file,
cert_file=self.cert_file,
context=self.context)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type',
'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "https:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers,
data)
def open_file(self, url):
"""Use local file or FTP depending on form of URL."""
if not isinstance(url, str):
raise IOError, ('file error', 'proxy support for file protocol currently not implemented')
if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
return self.open_ftp(url)
else:
return self.open_local_file(url)
def open_local_file(self, url):
"""Use local file."""
import mimetypes, mimetools, email.utils
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, file = splithost(url)
localname = url2pathname(file)
try:
stats = os.stat(localname)
except OSError, e:
raise IOError(e.errno, e.strerror, e.filename)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(url)[0]
headers = mimetools.Message(StringIO(
'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if not host:
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
elif file[:2] == './':
raise ValueError("local file url may start with / or file:. Unknown url of type: %s" % url)
return addinfourl(open(localname, 'rb'),
headers, urlfile)
host, port = splitport(host)
if not port \
and socket.gethostbyname(host) in (localhost(), thishost()):
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
return addinfourl(open(localname, 'rb'),
headers, urlfile)
raise IOError, ('local file error', 'not on local host')
def open_ftp(self, url):
"""Use FTP protocol."""
if not isinstance(url, str):
raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented')
import mimetypes, mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, path = splithost(url)
if not host: raise IOError, ('ftp error', 'no host given')
host, port = splitport(host)
user, host = splituser(host)
if user: user, passwd = splitpasswd(user)
else: passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
host = socket.gethostbyname(host)
if not port:
import ftplib
port = ftplib.FTP_PORT
else:
port = int(port)
path, attrs = splitattr(path)
path = unquote(path)
dirs = path.split('/')
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]: dirs = dirs[1:]
if dirs and not dirs[0]: dirs[0] = '/'
key = user, host, port, '/'.join(dirs)
# XXX thread unsafe!
if len(self.ftpcache) > MAXFTPCACHE:
# Prune the cache, rather arbitrarily
for k in self.ftpcache.keys():
if k != key:
v = self.ftpcache[k]
del self.ftpcache[k]
v.close()
try:
if not key in self.ftpcache:
self.ftpcache[key] = \
ftpwrapper(user, passwd, host, port, dirs)
if not file: type = 'D'
else: type = 'I'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
(fp, retrlen) = self.ftpcache[key].retrfile(file, type)
mtype = mimetypes.guess_type("ftp:" + url)[0]
headers = ""
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
headers = mimetools.Message(StringIO(headers))
return addinfourl(fp, headers, "ftp:" + url)
except ftperrors(), msg:
raise IOError, ('ftp error', msg), sys.exc_info()[2]
def open_data(self, url, data=None):
"""Use "data" URL."""
if not isinstance(url, str):
raise IOError, ('data error', 'proxy support for data protocol currently not implemented')
# ignore POSTed data
#
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
[type, data] = url.split(',', 1)
except ValueError:
raise IOError, ('data error', 'bad data URL')
if not type:
type = 'text/plain;charset=US-ASCII'
semi = type.rfind(';')
if semi >= 0 and '=' not in type[semi:]:
encoding = type[semi+1:]
type = type[:semi]
else:
encoding = ''
msg = []
msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(time.time())))
msg.append('Content-type: %s' % type)
if encoding == 'base64':
data = base64.decodestring(data)
else:
data = unquote(data)
msg.append('Content-Length: %d' % len(data))
msg.append('')
msg.append(data)
msg = '\n'.join(msg)
f = StringIO(msg)
headers = mimetools.Message(f, 0)
#f.fileno = None # needed for addinfourl
return addinfourl(f, headers, url)
class FancyURLopener(URLopener):
"""Derived class with handlers for errors we can handle (perhaps)."""
def __init__(self, *args, **kwargs):
URLopener.__init__(self, *args, **kwargs)
self.auth_cache = {}
self.tries = 0
self.maxtries = 10
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handling -- don't raise an exception."""
return addinfourl(fp, headers, "http:" + url, errcode)
def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 302 -- relocated (temporarily)."""
self.tries += 1
try:
if self.maxtries and self.tries >= self.maxtries:
if hasattr(self, "http_error_500"):
meth = self.http_error_500
else:
meth = self.http_error_default
return meth(url, fp, 500,
"Internal Server Error: Redirect Recursion",
headers)
result = self.redirect_internal(url, fp, errcode, errmsg,
headers, data)
return result
finally:
self.tries = 0
def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
if 'location' in headers:
newurl = headers['location']
elif 'uri' in headers:
newurl = headers['uri']
else:
return
fp.close()
# In case the server sent a relative URL, join with original:
newurl = basejoin(self.type + ":" + url, newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise IOError('redirect error', errcode,
errmsg + " - Redirection to url '%s' is not allowed" %
newurl,
headers)
return self.open(newurl)
def http_error_301(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 301 -- also relocated (permanently)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_303(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 303 -- also relocated (essentially identical to 302)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 307 -- relocated, but turn POST into error."""
if data is None:
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
else:
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 401 -- authentication required.
This function supports Basic authentication only."""
if not 'www-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['www-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def http_error_407(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 407 -- proxy authentication required.
This function supports Basic authentication only."""
if not 'proxy-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['proxy-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_proxy_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def retry_proxy_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'http://' + host + selector
proxy = self.proxies['http']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['http'] = 'http://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_proxy_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'https://' + host + selector
proxy = self.proxies['https']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['https'] = 'https://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'http://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'https://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def get_user_passwd(self, host, realm, clear_cache=0):
key = realm + '@' + host.lower()
if key in self.auth_cache:
if clear_cache:
del self.auth_cache[key]
else:
return self.auth_cache[key]
user, passwd = self.prompt_user_passwd(host, realm)
if user or passwd: self.auth_cache[key] = (user, passwd)
return user, passwd
def prompt_user_passwd(self, host, realm):
"""Override this in a GUI environment!"""
import getpass
try:
user = raw_input("Enter username for %s at %s: " % (realm,
host))
passwd = getpass.getpass("Enter password for %s in %s at %s: " %
(user, realm, host))
return user, passwd
except KeyboardInterrupt:
print
return None, None
# Utility functions
_localhost = None
def localhost():
"""Return the IP address of the magic hostname 'localhost'."""
global _localhost
if _localhost is None:
_localhost = socket.gethostbyname('localhost')
return _localhost
_thishost = None
def thishost():
"""Return the IP address of the current host."""
global _thishost
if _thishost is None:
try:
_thishost = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
_thishost = socket.gethostbyname('localhost')
return _thishost
_ftperrors = None
def ftperrors():
"""Return the set of errors raised by the FTP class."""
global _ftperrors
if _ftperrors is None:
import ftplib
_ftperrors = ftplib.all_errors
return _ftperrors
_noheaders = None
def noheaders():
"""Return an empty mimetools.Message object."""
global _noheaders
if _noheaders is None:
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
_noheaders = mimetools.Message(StringIO(), 0)
_noheaders.fp.close() # Recycle file descriptor
return _noheaders
# Utility classes
class ftpwrapper:
"""Class used by open_ftp() for cache of open FTP connections."""
def __init__(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
persistent=True):
self.user = user
self.passwd = passwd
self.host = host
self.port = port
self.dirs = dirs
self.timeout = timeout
self.refcount = 0
self.keepalive = persistent
try:
self.init()
except:
self.close()
raise
def init(self):
import ftplib
self.busy = 0
self.ftp = ftplib.FTP()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
_target = '/'.join(self.dirs)
self.ftp.cwd(_target)
def retrfile(self, file, type):
import ftplib
self.endtransfer()
if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
else: cmd = 'TYPE ' + type; isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn, retrlen = self.ftp.ntransfercmd(cmd)
except ftplib.error_perm, reason:
if str(reason)[:3] != '550':
raise IOError, ('ftp error', reason), sys.exc_info()[2]
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing. Verify that directory exists.
if file:
pwd = self.ftp.pwd()
try:
try:
self.ftp.cwd(file)
except ftplib.error_perm, reason:
raise IOError, ('ftp error', reason), sys.exc_info()[2]
finally:
self.ftp.cwd(pwd)
cmd = 'LIST ' + file
else:
cmd = 'LIST'
conn, retrlen = self.ftp.ntransfercmd(cmd)
self.busy = 1
ftpobj = addclosehook(conn.makefile('rb'), self.file_close)
self.refcount += 1
conn.close()
# Pass back both a suitably decorated object and a retrieval length
return (ftpobj, retrlen)
def endtransfer(self):
self.busy = 0
def close(self):
self.keepalive = False
if self.refcount <= 0:
self.real_close()
def file_close(self):
self.endtransfer()
self.refcount -= 1
if self.refcount <= 0 and not self.keepalive:
self.real_close()
def real_close(self):
self.endtransfer()
try:
self.ftp.close()
except ftperrors():
pass
class addbase:
"""Base class for addinfo and addclosehook."""
def __init__(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
if hasattr(self.fp, "__iter__"):
self.__iter__ = self.fp.__iter__
if hasattr(self.fp, "next"):
self.next = self.fp.next
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
if self.fp: self.fp.close()
self.fp = None
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
try:
closehook = self.closehook
hookargs = self.hookargs
if closehook:
self.closehook = None
self.hookargs = None
closehook(*hookargs)
finally:
addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
try:
unicode
except NameError:
def _is_unicode(x):
return 0
else:
def _is_unicode(x):
return isinstance(x, unicode)
def toBytes(url):
"""toBytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed
if _is_unicode(url):
try:
url = url.encode("ASCII")
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = url.strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match:
host_port = match.group(1)
path = match.group(2)
if path and not path.startswith('/'):
path = '/' + path
return host_port, path
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$',re.S)
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]*)$')
match = _portprog.match(host)
if match:
host, port = match.groups()
if port:
return host, port
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
if port:
try:
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
# urlparse contains a duplicate of this method to avoid a circular import. If
# you update this method, also update the copy in urlparse. This code
# duplication does not exist in Python3.
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a + b, chr(int(a + b, 16)))
for a in _hexdig for b in _hexdig)
_asciire = re.compile('([\x00-\x7f]+)')
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
if _is_unicode(s):
if '%' not in s:
return s
bits = _asciire.split(s)
res = [bits[0]]
append = res.append
for i in range(1, len(bits), 2):
append(unquote(str(bits[i])).decode('latin1'))
append(bits[i + 1])
return ''.join(res)
bits = s.split('%')
# fastpath
if len(bits) == 1:
return s
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_hextochr[item[:2]])
append(item[2:])
except KeyError:
append('%')
append(item)
return ''.join(res)
def unquote_plus(s):
"""unquote('%7e/abc+def') -> '~/abc def'"""
s = s.replace('+', ' ')
return unquote(s)
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
_safe_map = {}
for i, c in zip(xrange(256), str(bytearray(xrange(256)))):
_safe_map[c] = c if (i < 128 and c in always_safe) else '%{:02X}'.format(i)
_safe_quoters = {}
def quote(s, safe='/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
# fastpath
if not s:
if s is None:
raise TypeError('None object cannot be quoted')
return s
cachekey = (safe, always_safe)
try:
(quoter, safe) = _safe_quoters[cachekey]
except KeyError:
safe_map = _safe_map.copy()
safe_map.update([(c, c) for c in safe])
quoter = safe_map.__getitem__
safe = always_safe + safe
_safe_quoters[cachekey] = (quoter, safe)
if not s.rstrip(safe):
return s
return ''.join(map(quoter, s))
def quote_plus(s, safe=''):
"""Quote the query fragment of a URL; replacing ' ' with '+'"""
if ' ' in s:
s = quote(s, safe + ' ')
return s.replace(' ', '+')
return quote(s, safe)
def urlencode(query, doseq=0):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_plus(str(k))
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = quote_plus(str(k))
if isinstance(v, str):
v = quote_plus(v)
l.append(k + '=' + v)
elif _is_unicode(v):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + quote_plus(str(elt)))
return '&'.join(l)
# Proxy handling
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. In order to prefer lowercase
variables, we process the environment in two passes, first matches any
and second matches only lower case proxies.
If you need a different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
# Get all variables
proxies = {}
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY
# (non-all-lowercase) as it may be set from the web server by a "Proxy:"
# header from the client
# If "proxy" is lowercase, it will still be used thanks to the next block
if 'REQUEST_METHOD' in os.environ:
proxies.pop('http', None)
# Get lowercase variables
for name, value in os.environ.items():
if name[-6:] == '_proxy':
name = name.lower()
if value:
proxies[name[:-6]] = value
else:
proxies.pop(name[:-6], None)
return proxies
def proxy_bypass_environment(host, proxies=None):
"""Test if proxies should not be used for a particular host.
Checks the proxies dict for the value of no_proxy, which should be a
list of comma separated DNS suffixes, or '*' for all hosts.
"""
if proxies is None:
proxies = getproxies_environment()
# don't bypass, if no_proxy isn't specified
try:
no_proxy = proxies['no']
except KeyError:
return 0
# '*' is special case for always bypass
if no_proxy == '*':
return 1
# strip port off host
hostonly, port = splitport(host)
# check if the host ends with any of the DNS suffixes
no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')]
for name in no_proxy_list:
if name:
name = re.escape(name)
pattern = r'(.+\.)?%s$' % name
if (re.match(pattern, hostonly, re.I)
or re.match(pattern, host, re.I)):
return 1
# otherwise, don't bypass
return 0
if sys.platform == 'darwin':
from _scproxy import _get_proxy_settings, _get_proxies
def proxy_bypass_macosx_sysconf(host):
"""
Return True iff this host shouldn't be accessed using a proxy
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
import re
import socket
from fnmatch import fnmatch
hostonly, port = splitport(host)
def ip2num(ipAddr):
parts = ipAddr.split('.')
parts = map(int, parts)
if len(parts) != 4:
parts = (parts + [0, 0, 0, 0])[:4]
return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3]
proxy_settings = _get_proxy_settings()
# Check for simple host names:
if '.' not in host:
if proxy_settings['exclude_simple']:
return True
hostIP = None
for value in proxy_settings.get('exceptions', ()):
# Items in the list are strings like these: *.local, 169.254/16
if not value: continue
m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value)
if m is not None:
if hostIP is None:
try:
hostIP = socket.gethostbyname(hostonly)
hostIP = ip2num(hostIP)
except socket.error:
continue
base = ip2num(m.group(1))
mask = m.group(2)
if mask is None:
mask = 8 * (m.group(1).count('.') + 1)
else:
mask = int(mask[1:])
mask = 32 - mask
if (hostIP >> mask) == (base >> mask):
return True
elif fnmatch(host, value):
return True
return False
def getproxies_macosx_sysconf():
"""Return a dictionary of scheme -> proxy server URL mappings.
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
return _get_proxies()
def proxy_bypass(host):
"""Return True, if a host should be bypassed.
Checks proxy settings gathered from the environment, if specified, or
from the MacOSX framework SystemConfiguration.
"""
proxies = getproxies_environment()
if proxies:
return proxy_bypass_environment(host, proxies)
else:
return proxy_bypass_macosx_sysconf(host)
def getproxies():
return getproxies_environment() or getproxies_macosx_sysconf()
elif os.name == 'nt':
def getproxies_registry():
"""Return a dictionary of scheme -> proxy server URL mappings.
Win32 uses the registry to store proxies.
"""
proxies = {}
try:
import _winreg
except ImportError:
# Std module, so should be around - but you never know!
return proxies
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
if proxyEnable:
# Returned as Unicode but problems if not converted to ASCII
proxyServer = str(_winreg.QueryValueEx(internetSettings,
'ProxyServer')[0])
if '=' in proxyServer:
# Per-protocol settings
for p in proxyServer.split(';'):
protocol, address = p.split('=', 1)
# See if address has a type:// prefix
import re
if not re.match('^([^/:]+)://', address):
address = '%s://%s' % (protocol, address)
proxies[protocol] = address
else:
# Use one setting for all protocols
if proxyServer[:5] == 'http:':
proxies['http'] = proxyServer
else:
proxies['http'] = 'http://%s' % proxyServer
proxies['https'] = 'https://%s' % proxyServer
proxies['ftp'] = 'ftp://%s' % proxyServer
internetSettings.Close()
except (WindowsError, ValueError, TypeError):
# Either registry key not found etc, or the value in an
# unexpected format.
# proxies already set up to be empty so nothing to do
pass
return proxies
def getproxies():
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
return getproxies_environment() or getproxies_registry()
def proxy_bypass_registry(host):
try:
import _winreg
import re
except ImportError:
# Std modules, so should be around - but you never know!
return 0
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = str(_winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0])
# ^^^^ Returned as Unicode but problems if not converted to ASCII
except WindowsError:
return 0
if not proxyEnable or not proxyOverride:
return 0
# try to make a host list from name and IP address.
rawHost, port = splitport(host)
host = [rawHost]
try:
addr = socket.gethostbyname(rawHost)
if addr != rawHost:
host.append(addr)
except socket.error:
pass
try:
fqdn = socket.getfqdn(rawHost)
if fqdn != rawHost:
host.append(fqdn)
except socket.error:
pass
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in rawHost:
return 1
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
for val in host:
# print "%s <--> %s" %( test, val )
if re.match(test, val, re.I):
return 1
return 0
def proxy_bypass(host):
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
proxies = getproxies_environment()
if proxies:
return proxy_bypass_environment(host, proxies)
else:
return proxy_bypass_registry(host)
else:
# By default use environment variables
getproxies = getproxies_environment
proxy_bypass = proxy_bypass_environment
# Test and time quote() and unquote()
def test1():
s = ''
for i in range(256): s = s + chr(i)
s = s*4
t0 = time.time()
qs = quote(s)
uqs = unquote(qs)
t1 = time.time()
if uqs != s:
print 'Wrong!'
print repr(s)
print repr(qs)
print repr(uqs)
print round(t1 - t0, 3), 'sec'
def reporthook(blocknum, blocksize, totalsize):
# Report during remote transfers
print "Block number: %d, Block size: %d, Total size: %d" % (
blocknum, blocksize, totalsize)
| gpl-3.0 |
cnbeining/you-get | src/you_get/extractors/kugou.py | 9 | 1998 | #!/usr/bin/env python
__all__ = ['kugou_download']
from ..common import *
from json import loads
from base64 import b64decode
import re
import hashlib
def kugou_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
if url.lower().find("5sing")!=-1:
#for 5sing.kugou.com
html=get_html(url)
ticket=r1(r'"ticket":\s*"(.*)"',html)
j=loads(str(b64decode(ticket),encoding="utf-8"))
url=j['file']
title=j['songName']
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
else:
#for the www.kugou.com/
return kugou_download_playlist(url, output_dir=output_dir, merge=merge, info_only=info_only)
# raise NotImplementedError(url)
def kugou_download_by_hash(title,hash_val,output_dir = '.', merge = True, info_only = False):
#sample
#url_sample:http://www.kugou.com/yy/album/single/536957.html
#hash ->key md5(hash+kgcloud")->key decompile swf
#cmd 4 for mp3 cmd 3 for m4a
key=hashlib.new('md5',(hash_val+"kgcloud").encode("utf-8")).hexdigest()
html=get_html("http://trackercdn.kugou.com/i/?pid=6&key=%s&acceptMp3=1&cmd=4&hash=%s"%(key,hash_val))
j=loads(html)
url=j['url']
songtype, ext, size = url_info(url)
print_info(site_info, title, songtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge=merge)
def kugou_download_playlist(url, output_dir = '.', merge = True, info_only = False, **kwargs):
html=get_html(url)
pattern=re.compile('title="(.*?)".* data="(\w*)\|.*?"')
pairs=pattern.findall(html)
for title,hash_val in pairs:
kugou_download_by_hash(title,hash_val,output_dir,merge,info_only)
site_info = "kugou.com"
download = kugou_download
# download_playlist = playlist_not_supported("kugou")
download_playlist=kugou_download_playlist
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/social/tests/backends/test_twitch.py | 87 | 1050 | import json
from social.tests.backends.oauth import OAuth2Test
class TwitchOAuth2Test(OAuth2Test):
backend_path = 'social.backends.twitch.TwitchOAuth2'
user_data_url = 'https://api.twitch.tv/kraken/user/'
expected_username = 'test_user1'
access_token_body = json.dumps({
'access_token': 'foobar',
})
user_data_body = json.dumps({
'type': 'user',
'name': 'test_user1',
'created_at': '2011-06-03T17:49:19Z',
'updated_at': '2012-06-18T17:19:57Z',
'_links': {
'self': 'https://api.twitch.tv/kraken/users/test_user1'
},
'logo': 'http://static-cdn.jtvnw.net/jtv_user_pictures/'
'test_user1-profile_image-62e8318af864d6d7-300x300.jpeg',
'_id': 22761313,
'display_name': 'test_user1',
'email': 'asdf@asdf.com',
'partnered': True,
'bio': 'test bio woo I\'m a test user'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| agpl-3.0 |
miguelangel6/nightwatchbamboo | node_modules/nodeunit/node_modules/tap/node_modules/yamlish/yamlish-py/test/__init__.py | 161 | 3430 | # -*- coding: utf-8 -*- IGNORE:C0111
from __future__ import absolute_import, print_function, unicode_literals
import logging
import yamlish
import yaml
import tempfile
import textwrap
INPUT = 1
OUTPUT = 2
if yamlish.py3k:
unicode = str
#logging.basicConfig(level=logging.DEBUG)
def _generate_test_name(source):
"""
Clean up human-friendly test name into a method name.
"""
out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()
return "test_%s" % out
def _create_input_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
got = ""
if 'error' in test_src:
self.assertRaises(test_src['error'], tested_function,
test_src['in'], options)
else:
want = test_src['out']
got = tested_function(test_src['in'], options)
logging.debug('got = type %s', type(got))
logging.debug("test_src['out'] = %s",
unicode(test_src['out']))
self.assertEqual(got, want, """Result matches
expected = %s
observed = %s
""" % (want, got))
return do_test_expected
def _create_output_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
# We currently don't throw any exceptions in Writer, so this
# this is always false
if 'error' in test_src:
self.assertRaises(test_src['error'], yamlish.dumps,
test_src['in'], options)
else:
logging.debug("out:\n%s", textwrap.dedent(test_src['out']))
want = yaml.load(textwrap.dedent(test_src['out']))
logging.debug("want:\n%s", want)
with tempfile.NamedTemporaryFile() as test_file:
tested_function(test_src['in'], test_file)
test_file.seek(0)
got_str = test_file.read()
logging.debug("got_str = %s", got_str)
got = yaml.load(got_str)
self.assertEqual(got, want, "Result matches")
return do_test_expected
def generate_testsuite(test_data, test_case_shell, test_fce, direction=INPUT,
options=None):
"""
Generate tests from the test data, class to build upon and function
to use for testing.
"""
for in_test in test_data:
if ('skip' in in_test) and in_test['skip']:
logging.debug("test %s skipped!", in_test['name'])
continue
name = _generate_test_name(in_test['name'])
if direction == INPUT:
test_method = _create_input_test(in_test, test_fce,
options=options)
elif direction == OUTPUT:
test_method = _create_output_test(in_test, test_fce,
options=options)
test_method.__name__ = str('test_%s' % name)
setattr(test_case_shell, test_method.__name__, test_method)
| mit |
Katsutami7moto/study | pl_compiler/stage one.py | 1 | 10038 | # coding=utf-8
# Пример
# def parse():
# global regexp, current
# terms = []
# term = []
# while current < len(regexp) and regexp[current] != ')':
# if regexp[current] == '(':
# current += 1
# term.append(parse())
# elif regexp[current] == '|':
# terms.append(term)
# term = []
# elif regexp[current] == '*' or regexp[current] == '+':
# term.append(createunode(regexp[current], term.pop()))
# else:
# term.append(createleaf(regexp[current]))
# current += 1
# terms.append(term)
# return makeor(terms)
# Test-driven development
tokens = []
current = 0
class Token:
def __init__(self, t, s=None, v=None):
self.type = t
self.subtype = s
self.value = v
class Node:
def __init__(self, t, v=None):
self.type = t
self.value = v
self.lchild = None
self.rchild = None
def setl(self, obj):
self.lchild = obj
def setr(self, obj):
self.rchild = obj
def create_bi_node(t, leftnode, rightnode):
nd = Node(t)
nd.setl(leftnode)
nd.setr(rightnode)
return nd
def make_sequence_tree(seq):
"""
:type seq: list
:rtype: Node
"""
pass
def make_ident_node(idtok):
"""
:type idtok: Token
:rtype: Node
"""
pass
def make_data_node(datok):
"""
:type datok: Token
:rtype: Node
"""
pass
def make_inverse_node(somenode):
"""
:type somenode: Node
:rtype: Node
"""
pass
def make_assign_node(term):
"""
:type term: list
:rtype: Node
"""
pass
def make_print_node(term):
"""
:type term: list
:rtype: Node
"""
pass
def parse_all_list():
"""
:rtype: Node
"""
global tokens, current
assert isinstance(tokens, list)
assert isinstance(current, int)
if tokens:
return parse_text()
else:
pass
def parse_text():
"""
:rtype: Node
"""
global tokens, current
assert isinstance(tokens, list)
assert isinstance(current, int)
seq = []
while current < len(tokens) and tokens[current].type != 'semicolon':
seq.append(parse_instructions())
return make_sequence_tree(seq)
def parse_instructions():
"""
:rtype: Node
"""
global tokens, current
assert isinstance(tokens, list)
assert isinstance(current, int)
if tokens[current].type == 'let_op':
return parse_let()
elif tokens[current].type == 'print_op':
return parse_print()
else:
pass
def parse_let():
"""
:rtype: Node
"""
global tokens, current
assert isinstance(tokens, list)
assert isinstance(current, int)
term = []
current += 1
if tokens[current].type == 'ident':
term.append(make_ident_node(tokens[current]))
current += 1
if tokens[current].type == 'equal_sign':
current += 1
term.append(parse_param())
return make_assign_node(term)
else:
pass
else:
pass
def parse_print():
"""
:rtype: Node
"""
global tokens, current
assert isinstance(tokens, list)
assert isinstance(current, int)
term = []
current += 1
if tokens[current].type == 'colon':
current += 1
term.append(parse_param())
return make_print_node(term)
else:
pass
def parse_param():
"""
:rtype: Node
"""
global tokens, current
assert isinstance(tokens, list)
assert isinstance(current, int)
term = []
while tokens[current].type != 'semicolon':
if tokens[current].type == 'ident' or tokens[current].type == 'int' or tokens[current].type == 'float' or \
tokens[current].type == 'string' or tokens[current].type == 'bool' or tokens[
current].type == 'math_op' or tokens[current].type == 'log_op' or tokens[current].type == 'lparen' or \
tokens[current].type == 'rparen':
term.append(tokens[current])
current += 1
else:
pass
current += 1
return parse_expr(term, 0)
# orexpr : andexpr (OR andexpr)*;
#
# andexpr : bool (AND bool)*;
#
# bool : TRUE | FALSE | boolatom | NOT boolatom;
#
# boolatom : ID | LPAREN boolexpr RPAREN;
#
# boolexpr : orexpr | typexpr | compare;
#
# typexpr : ID QM ID;
#
# compare : mathexpr (MORE | LESS | MOREQ | LESEQ | EQ | NEQ) mathexpr;
#
# mathexpr : multexpr ((PLUS | MINUS) multexpr)* | MINUS mathexpr;
#
# multexpr : mathatom ((MULT | DIV | MOD) mathatom)*;
#
# mathatom : INT | FLOAT | ID | LPAREN mathexpr RPAREN;
# Везде, где есть ID (кроме правого операнда typexpr), потом добавятся:
# CALL, METHOD, FIELD, LPAREN LAMBDARET RPAREN, KCALL;
# ! логическое НЕ унарный 15 справа налево
# * / % мультипликативные операции бинарный 13 слева направо
# + - аддитивные операции бинарный 12 слева направо
# < > <= >= отношения бинарный 10 слева направо
# = != равенство/неравенство бинарный 9 слева направо
# && логическое И бинарный 5 слева направо
# || логическое ИЛИ бинарный 4 слева направо
def parse_mathexpr(term, curr):
"""
:type term: list
:type curr: int
:rtype: Node
"""
pass
def parse_bool(term):
"""
:type term: list
:rtype: Node
"""
if len(term) == 1:
if term[0].type == 'bool':
return make_data_node(term[0])
elif term[0].type == 'ident':
return make_ident_node(term[0])
else:
pass
elif len(term) == 2:
if term[0].subtype == 'not' and term[1].type == 'ident':
return make_inverse_node(make_ident_node(term[1]))
else:
pass
else:
if term[0].subtype == 'not':
if term[1].type == 'lparen' and term[-1].type == 'rparen':
return make_inverse_node(parse_boolexpr(term, 1))
else:
pass
elif term[0].type == 'lparen' and term[-1].type == 'rparen':
return parse_boolexpr(term, 0)
def make_and_node(terms):
"""
:type terms: list
:rtype: Node
"""
result = parse_bool(terms[0])
for one in range(1, len(terms)):
result = create_bi_node('and_op', result, parse_bool(terms[one]))
return result
def parse_and(term):
"""
:rtype: Node
:type term: list
"""
ts = []
t = []
curr = 0
while curr < len(term) and term[curr].type != 'rparen':
if term[curr].type == 'lparen':
curr += 1
t.append(parse_boolexpr(term, curr))
elif term[curr].subtype == 'and':
ts.append(t)
t = []
else:
t.append(term[curr])
curr += 1
ts.append(t)
return make_and_node(ts)
def make_or_node(terms):
"""
:type terms: list
:rtype: Node
"""
result = parse_and(terms[0])
for one in range(1, len(terms)):
result = create_bi_node('or_op', result, parse_and(terms[one]))
return result
def parse_or(term, curr):
"""
:type term: list
:type curr: int
:rtype: Node
"""
ts = []
t = []
while curr < len(term) and term[curr].type != 'rparen':
if term[curr].type == 'lparen':
curr += 1
t.append(parse_boolexpr(term, curr))
elif term[curr].subtype == 'or':
ts.append(t)
t = []
else:
t.append(term[curr])
curr += 1
ts.append(t)
return make_or_node(ts)
def parse_type(term, curr):
pass
def parse_compare(term, curr):
pass
def parse_boolexpr(term, curr):
"""
:type term: list
:type curr: int
:rtype: Node
"""
o = False
t = False
r = False
while curr < len(term) and term[curr].type != 'rparen':
if term[curr].type == 'lparen':
curr += 1
parse_boolexpr(term, curr)
elif term[curr].subtype == 'or':
o = True
break
elif term[curr].subtype == 'qm':
t = True
break
elif term[curr].subtype == 'compare':
r = True
break
else:
pass
if o:
return parse_or(term, 0)
elif t:
return parse_type(term, 0)
elif r:
return parse_compare(term, 0)
else:
return parse_bool(term)
def parse_no_ops(term, curr):
"""
:type term: list
:type curr: int
:rtype: Node
"""
if len(term) == 1:
if term[curr].type == 'ident':
return make_ident_node(term[curr])
elif term[curr].type == 'int' or term[curr].type == 'float' or term[curr].type == 'string' or \
term[curr].type == 'bool':
return make_data_node(term[curr])
else:
pass
else:
pass
def parse_expr(term, curr):
"""
:type term: list
:type curr: int
:rtype: Node
"""
m = False
l = False
while curr < len(term) and term[curr].type != 'rparen':
if term[curr].type == 'lparen':
curr += 1
parse_expr(term, curr)
elif term[curr].type == 'math_op' and not m:
m = True
elif term[curr].type == 'log_op':
l = True
break
else:
pass
curr += 1
if l:
return parse_boolexpr(term, 0)
elif m and not l:
return parse_mathexpr(term, 0)
elif not m and not l:
return parse_no_ops(term, 0)
| mit |
cee1/cerbero-mac | cerbero/commands/bootstrap.py | 4 | 1367 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from cerbero.commands import Command, register_command
from cerbero.utils import N_
from cerbero.bootstrap.bootstraper import Bootstraper
class Bootstrap(Command):
doc = N_('Bootstrap the build system installing all the dependencies')
name = 'bootstrap'
def __init__(self):
Command.__init__(self, [])
def run(self, config, args):
bootstrapers = Bootstraper(config)
for bootstraper in bootstrapers:
bootstraper.start()
register_command(Bootstrap)
| lgpl-2.1 |
SebDieBln/QGIS | python/plugins/processing/algs/lidar/lastools/las2dem.py | 12 | 3590 | # -*- coding: utf-8 -*-
"""
***************************************************************************
las2dem.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterBoolean
class las2dem(LAStoolsAlgorithm):
ATTRIBUTE = "ATTRIBUTE"
PRODUCT = "PRODUCT"
ATTRIBUTES = ["elevation", "slope", "intensity", "rgb", "edge_longest", "edge_shortest"]
PRODUCTS = ["actual values", "hillshade", "gray", "false"]
USE_TILE_BB = "USE_TILE_BB"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('las2dem')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParametersFilter1ReturnClassFlagsGUI()
self.addParametersStepGUI()
self.addParameter(ParameterSelection(las2dem.ATTRIBUTE,
self.tr("Attribute"), las2dem.ATTRIBUTES, 0))
self.addParameter(ParameterSelection(las2dem.PRODUCT,
self.tr("Product"), las2dem.PRODUCTS, 0))
self.addParameter(ParameterBoolean(las2dem.USE_TILE_BB,
self.tr("use tile bounding box (after tiling with buffer)"), False))
self.addParametersRasterOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "las2dem")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
self.addParametersFilter1ReturnClassFlagsCommands(commands)
self.addParametersStepCommands(commands)
attribute = self.getParameterValue(las2dem.ATTRIBUTE)
if attribute != 0:
commands.append("-" + las2dem.ATTRIBUTES[attribute])
product = self.getParameterValue(las2dem.PRODUCT)
if product != 0:
commands.append("-" + las2dem.PRODUCTS[product])
if (self.getParameterValue(las2dem.USE_TILE_BB)):
commands.append("-use_tile_bb")
self.addParametersRasterOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
jruiperezv/ANALYSE | common/djangoapps/status/tests.py | 6 | 2852 | from django.conf import settings
from django.test import TestCase
import os
from django.test.utils import override_settings
from tempfile import NamedTemporaryFile
from .status import get_site_status_msg
# Get a name where we can put test files
TMP_FILE = NamedTemporaryFile(delete=False)
TMP_NAME = TMP_FILE.name
# Close it--we just want the path.
TMP_FILE.close()
@override_settings(STATUS_MESSAGE_PATH=TMP_NAME)
class TestStatus(TestCase):
"""Test that the get_site_status_msg function does the right thing"""
no_file = None
invalid_json = """{
"global" : "Hello, Globe",
}"""
global_only = """{
"global" : "Hello, Globe"
}"""
toy_only = """{
"edX/toy/2012_Fall" : "A toy story"
}"""
global_and_toy = """{
"global" : "Hello, Globe",
"edX/toy/2012_Fall" : "A toy story"
}"""
# json to use, expected results for course=None (e.g. homepage),
# for toy course, for full course. Note that get_site_status_msg
# is supposed to return global message even if course=None. The
# template just happens to not display it outside the courseware
# at the moment...
checks = [
(no_file, None, None, None),
(invalid_json, None, None, None),
(global_only, "Hello, Globe", "Hello, Globe", "Hello, Globe"),
(toy_only, None, "A toy story", None),
(global_and_toy, "Hello, Globe", "Hello, Globe<br>A toy story", "Hello, Globe"),
]
def setUp(self):
"""
Fake course ids, since we don't have to have full django
settings (common tests run without the lms settings imported)
"""
self.full_id = 'edX/full/2012_Fall'
self.toy_id = 'edX/toy/2012_Fall'
def create_status_file(self, contents):
"""
Write contents to settings.STATUS_MESSAGE_PATH.
"""
with open(settings.STATUS_MESSAGE_PATH, 'w') as f:
f.write(contents)
def remove_status_file(self):
"""Delete the status file if it exists"""
if os.path.exists(settings.STATUS_MESSAGE_PATH):
os.remove(settings.STATUS_MESSAGE_PATH)
def tearDown(self):
self.remove_status_file()
def test_get_site_status_msg(self):
"""run the tests"""
for (json_str, exp_none, exp_toy, exp_full) in self.checks:
self.remove_status_file()
if json_str:
self.create_status_file(json_str)
print "checking results for {0}".format(json_str)
print "course=None:"
self.assertEqual(get_site_status_msg(None), exp_none)
print "course=toy:"
self.assertEqual(get_site_status_msg(self.toy_id), exp_toy)
print "course=full:"
self.assertEqual(get_site_status_msg(self.full_id), exp_full)
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2020_11_01/aio/operations/_operations.py | 1 | 4689 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databox.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.OperationList"]:
"""This method gets all the operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.databox.models.OperationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.DataBox/operations'} # type: ignore
| mit |
alash3al/rethinkdb | test/common/scenario_common.py | 50 | 1947 | # Copyright 2010-2014 RethinkDB, all rights reserved.
import random, shlex
import driver, utils, vcoptparse, workload_runner
def prepare_option_parser_mode_flags(opt_parser):
opt_parser["valgrind"] = vcoptparse.BoolFlag("--valgrind")
opt_parser["valgrind-options"] = vcoptparse.StringFlag("--valgrind-options", "--leak-check=full --track-origins=yes --child-silent-after-fork=yes")
opt_parser["wrapper"] = vcoptparse.StringFlag("--wrapper", None)
opt_parser["mode"] = vcoptparse.StringFlag("--mode", "")
opt_parser["serve-flags"] = vcoptparse.StringFlag("--serve-flags", "")
def parse_mode_flags(parsed_opts):
mode = parsed_opts["mode"]
if parsed_opts["valgrind"]:
assert parsed_opts["wrapper"] is None
command_prefix = ["valgrind"]
for valgrind_option in shlex.split(parsed_opts["valgrind-options"]):
command_prefix.append(valgrind_option)
# Make sure we use the valgrind build
# this assumes that the 'valgrind' substring goes at the end of the specific build string
if "valgrind" not in mode and mode != "":
mode = mode + "-valgrind"
elif parsed_opts["wrapper"] is not None:
command_prefix = shlex.split(parsed_opts["wrapper"])
else:
command_prefix = []
return utils.find_rethinkdb_executable(mode=mode), command_prefix, shlex.split(parsed_opts["serve-flags"])
def prepare_table_for_workload(http, **kwargs):
db = http.add_database(name="test")
return http.add_table(database=db, **kwargs)
def get_workload_ports(processes, tableName, dbName='test'):
for process in processes:
assert isinstance(process, (driver.Process, driver.ProxyProcess))
process = random.choice(processes)
return workload_runner.RDBPorts(
host = process.host,
http_port = process.http_port,
rdb_port = process.driver_port,
table_name = tableName,
db_name = dbName)
| agpl-3.0 |
MyRayTech/CMSFul | vendor/composer/b91f24a5/doctrine-doctrine2-93103f4/docs/en/conf.py | 2448 | 6497 | # -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
| mit |
hospace/ToughRADIUS | toughradius/tools/shell.py | 6 | 3848 | #!/usr/bin/env python
#coding:utf-8
import sys
import os
import subprocess
import time
class ToughError(Exception):
def __init__(self, message):
self.message = message
class ToughShell(object):
def __init__(self,logfile=None,debug=False):
self.logfile = logfile
self.is_debug = debug
self.is_win32 = sys.platform in ['win32']
# error
def inred(self,s):
return self.is_win32 and s or "%s[31;2m%s%s[0m"%(chr(27),s, chr(27))
# success
def ingreen(self,s):
return self.is_win32 and s or "%s[32;2m%s%s[0m"%(chr(27),s, chr(27))
# operate
def inblue(self,s):
return self.is_win32 and s or "%s[34;2m%s%s[0m"%(chr(27),s, chr(27))
# info
def incblue(self,s):
return self.is_win32 and s or "%s[36;2m%s%s[0m"%(chr(27),s, chr(27))
# warning Magenta
def inwarn(self,s):
return self.is_win32 and s or "%s[35;2m%s%s[0m"%(chr(27),s, chr(27))
def log(self,msg,_font=None,logfile=None):
print _font(msg)
if self.logfile:
with open(self.logfile,'ab') as fs:
fs.write(msg)
fs.write('\n')
def info(self,msg):
self.log('[INFO] - %s'%msg,_font=self.incblue)
def debug(self,msg):
self.log('[DEBUG] - %s'%msg,_font=self.inblue)
def succ(self,msg):
self.log('[SUCC] - %s'%msg,_font=self.ingreen)
def err(self,msg):
self.log('[ERROR] - %s'%msg,_font=self.inred)
def warn(self,msg):
self.log('[WARN] - %s'%msg,_font=self.inwarn)
def read(self,ask):
result = raw_input(self.incblue('[INPUT] - %s'%ask))
if self.is_debug:
self.debug('<question - %s | answer - %s>'%(ask,result))
return result
def wait(self,sec=0):
if not sec:return
sec = int(sec)
_range = range(1,sec+1)
_range.reverse()
for i in _range:
self.debug(str(i))
time.sleep(1.0)
def run(self,command, raise_on_fail=False, shell=True, env=None,wait=0):
self.info(">> run command : %s"%command)
_result = dict(code=0)
run_env = os.environ.copy()
if env:run_env.update(env)
if wait > 0:
subprocess.Popen(command, shell=True)
self.wait(wait)
else:
proc = subprocess.Popen(command,shell=shell,
stdout=subprocess.PIPE,stderr=subprocess.PIPE,
env=run_env)
stdout, stderr = proc.communicate('through stdin to stdout')
result = proc.returncode, stdout, stderr
if proc.returncode > 0 and raise_on_fail:
error_string = "# Could not run command (return code= %s)\n" % proc.returncode
error_string += "# Error was:\n%s\n" % (stderr.strip())
error_string += "# Command was:\n%s\n" % command
error_string += "# Output was:\n%s\n" % (stdout.strip())
if proc.returncode == 127: # File not found, lets print path
path = os.getenv("PATH")
error_string += "# Check if y/our path is correct: %s" % path
self.err(error_string)
raise ToughError(error_string)
else:
if self.is_debug:
if stdout.strip():
self.debug(stdout)
if stderr.strip():
self.err(stderr)
if proc.returncode == 0:
self.succ(">> run command : %s success!"%command)
else:
self.err(">> run command : %s failure!"%command)
return result
shell = ToughShell() | agpl-3.0 |
leiferikb/bitpop | src/third_party/pywebsocket/src/test/test_util.py | 449 | 7538 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for util module."""
import os
import random
import sys
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import util
_TEST_DATA_DIR = os.path.join(os.path.split(__file__)[0], 'testdata')
class UtilTest(unittest.TestCase):
"""A unittest for util module."""
def test_get_stack_trace(self):
self.assertEqual('None\n', util.get_stack_trace())
try:
a = 1 / 0 # Intentionally raise exception.
except Exception:
trace = util.get_stack_trace()
self.failUnless(trace.startswith('Traceback'))
self.failUnless(trace.find('ZeroDivisionError') != -1)
def test_prepend_message_to_exception(self):
exc = Exception('World')
self.assertEqual('World', str(exc))
util.prepend_message_to_exception('Hello ', exc)
self.assertEqual('Hello World', str(exc))
def test_get_script_interp(self):
cygwin_path = 'c:\\cygwin\\bin'
cygwin_perl = os.path.join(cygwin_path, 'perl')
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README')))
self.assertEqual(None, util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'README'), cygwin_path))
self.assertEqual('/usr/bin/perl -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl')))
self.assertEqual(cygwin_perl + ' -wT', util.get_script_interp(
os.path.join(_TEST_DATA_DIR, 'hello.pl'), cygwin_path))
def test_hexify(self):
self.assertEqual('61 7a 41 5a 30 39 20 09 0d 0a 00 ff',
util.hexify('azAZ09 \t\r\n\x00\xff'))
class RepeatedXorMaskerTest(unittest.TestCase):
"""A unittest for RepeatedXorMasker class."""
def test_mask(self):
# Sample input e6,97,a5 is U+65e5 in UTF-8
masker = util.RepeatedXorMasker('\xff\xff\xff\xff')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x19\x68\x5a', result)
masker = util.RepeatedXorMasker('\x00\x00\x00\x00')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\xe6\x97\xa5', result)
masker = util.RepeatedXorMasker('\xe6\x97\xa5\x20')
result = masker.mask('\xe6\x97\xa5')
self.assertEqual('\x00\x00\x00', result)
def test_mask_twice(self):
masker = util.RepeatedXorMasker('\x00\x7f\xff\x20')
# mask[0], mask[1], ... will be used.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x00\x7f\xff\x20\x00', result)
# mask[2], mask[0], ... will be used for the next call.
result = masker.mask('\x00\x00\x00\x00\x00')
self.assertEqual('\x7f\xff\x20\x00\x7f', result)
def test_mask_large_data(self):
masker = util.RepeatedXorMasker('mASk')
original = ''.join([chr(i % 256) for i in xrange(1000)])
result = masker.mask(original)
expected = ''.join(
[chr((i % 256) ^ ord('mASk'[i % 4])) for i in xrange(1000)])
self.assertEqual(expected, result)
masker = util.RepeatedXorMasker('MaSk')
first_part = 'The WebSocket Protocol enables two-way communication.'
result = masker.mask(first_part)
self.assertEqual(
'\x19\t6K\x1a\x0418"\x028\x0e9A\x03\x19"\x15<\x08"\rs\x0e#'
'\x001\x07(\x12s\x1f:\x0e~\x1c,\x18s\x08"\x0c>\x1e#\x080\n9'
'\x08<\x05c',
result)
second_part = 'It has two parts: a handshake and the data transfer.'
result = masker.mask(second_part)
self.assertEqual(
"('K%\x00 K9\x16<K=\x00!\x1f>[s\nm\t2\x05)\x12;\n&\x04s\n#"
"\x05s\x1f%\x04s\x0f,\x152K9\x132\x05>\x076\x19c",
result)
def get_random_section(source, min_num_chunks):
chunks = []
bytes_chunked = 0
while bytes_chunked < len(source):
chunk_size = random.randint(
1,
min(len(source) / min_num_chunks, len(source) - bytes_chunked))
chunk = source[bytes_chunked:bytes_chunked + chunk_size]
chunks.append(chunk)
bytes_chunked += chunk_size
return chunks
class InflaterDeflaterTest(unittest.TestCase):
"""A unittest for _Inflater and _Deflater class."""
def test_inflate_deflate_default(self):
input = b'hello' + '-' * 30000 + b'hello'
inflater15 = util._Inflater(15)
deflater15 = util._Deflater(15)
inflater8 = util._Inflater(8)
deflater8 = util._Deflater(8)
compressed15 = deflater15.compress_and_finish(input)
compressed8 = deflater8.compress_and_finish(input)
inflater15.append(compressed15)
inflater8.append(compressed8)
self.assertNotEqual(compressed15, compressed8)
self.assertEqual(input, inflater15.decompress(-1))
self.assertEqual(input, inflater8.decompress(-1))
def test_random_section(self):
random.seed(a=0)
source = ''.join(
[chr(random.randint(0, 255)) for i in xrange(100 * 1024)])
chunked_input = get_random_section(source, 10)
print "Input chunk sizes: %r" % [len(c) for c in chunked_input]
deflater = util._Deflater(15)
compressed = []
for chunk in chunked_input:
compressed.append(deflater.compress(chunk))
compressed.append(deflater.compress_and_finish(''))
chunked_expectation = get_random_section(source, 10)
print ("Expectation chunk sizes: %r" %
[len(c) for c in chunked_expectation])
inflater = util._Inflater(15)
inflater.append(''.join(compressed))
for chunk in chunked_expectation:
decompressed = inflater.decompress(len(chunk))
self.assertEqual(chunk, decompressed)
self.assertEqual('', inflater.decompress(-1))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| gpl-3.0 |
GJL/flink | flink-python/pyflink/datastream/tests/test_state_backend.py | 1 | 9033 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.state_backend import (_from_j_state_backend, CustomStateBackend,
MemoryStateBackend, FsStateBackend,
RocksDBStateBackend, PredefinedOptions)
from pyflink.java_gateway import get_gateway
from pyflink.pyflink_gateway_server import on_windows
from pyflink.testing.test_case_utils import PyFlinkTestCase
from pyflink.util.utils import load_java_class
class MemoryStateBackendTests(PyFlinkTestCase):
def test_constant(self):
gateway = get_gateway()
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory \
.MemoryStateBackend
self.assertEqual(MemoryStateBackend.DEFAULT_MAX_STATE_SIZE,
JMemoryStateBackend.DEFAULT_MAX_STATE_SIZE)
def test_create_memory_state_backend(self):
self.assertIsNotNone(MemoryStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(MemoryStateBackend("file://var/checkpoints/",
"file://var/savepoints/"))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000, True))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000, False))
def test_is_using_asynchronous_snapshots(self):
state_backend = MemoryStateBackend()
self.assertTrue(state_backend.is_using_asynchronous_snapshots())
state_backend = MemoryStateBackend(using_asynchronous_snapshots=True)
self.assertTrue(state_backend.is_using_asynchronous_snapshots())
state_backend = MemoryStateBackend(using_asynchronous_snapshots=False)
self.assertFalse(state_backend.is_using_asynchronous_snapshots())
def test_get_max_state_size(self):
state_backend = MemoryStateBackend()
self.assertEqual(state_backend.get_max_state_size(),
MemoryStateBackend.DEFAULT_MAX_STATE_SIZE)
state_backend = MemoryStateBackend(max_state_size=50000)
self.assertEqual(state_backend.get_max_state_size(), 50000)
class FsStateBackendTests(PyFlinkTestCase):
def test_create_fs_state_backend(self):
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/", "file://var/savepoints/"))
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/",
"file://var/savepoints/", 2048))
self.assertIsNotNone(FsStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 2048, 2048, True))
self.assertIsNotNone(FsStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 2048, 4096))
def test_get_min_file_size_threshold(self):
state_backend = FsStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_min_file_size_threshold(), 1024)
state_backend = FsStateBackend("file://var/checkpoints/", file_state_size_threshold=2048)
self.assertEqual(state_backend.get_min_file_size_threshold(), 2048)
def test_get_checkpoint_path(self):
state_backend = FsStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_checkpoint_path(), "file://var/checkpoints")
class RocksDBStateBackendTests(PyFlinkTestCase):
def test_create_rocks_db_state_backend(self):
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/", True))
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/", False))
self.assertIsNotNone(RocksDBStateBackend(
checkpoint_stream_backend=FsStateBackend("file://var/checkpoints/")))
def test_get_checkpoint_backend(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
checkpoint_backend = state_backend.get_checkpoint_backend()
self.assertIsInstance(checkpoint_backend, FsStateBackend)
self.assertEqual(checkpoint_backend.get_checkpoint_path(), "file://var/checkpoints")
def test_get_set_db_storage_paths(self):
if on_windows():
checkpoints_path = "file:/C:/var/checkpoints/"
storage_path = ["file:/C:/var/db_storage_dir1/",
"file:/C:/var/db_storage_dir2/",
"file:/C:/var/db_storage_dir3/"]
expected = ["C:\\var\\db_storage_dir1",
"C:\\var\\db_storage_dir2",
"C:\\var\\db_storage_dir3"]
else:
checkpoints_path = "file://var/checkpoints/"
storage_path = ["file://var/db_storage_dir1/",
"file://var/db_storage_dir2/",
"file://var/db_storage_dir3/"]
expected = ["/db_storage_dir1",
"/db_storage_dir2",
"/db_storage_dir3"]
state_backend = RocksDBStateBackend(checkpoints_path)
state_backend.set_db_storage_paths(*storage_path)
self.assertEqual(state_backend.get_db_storage_paths(), expected)
def test_get_set_predefined_options(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.FLASH_SSD_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.FLASH_SSD_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.DEFAULT)
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
def test_get_set_options(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
self.assertIsNone(state_backend.get_options())
state_backend.set_options(
"org.apache.flink.contrib.streaming.state.DefaultConfigurableOptionsFactory")
self.assertEqual(state_backend.get_options(),
"org.apache.flink.contrib.streaming.state."
"DefaultConfigurableOptionsFactory")
def test_get_set_number_of_transfering_threads(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_number_of_transfering_threads(), 1)
state_backend.set_number_of_transfering_threads(4)
self.assertEqual(state_backend.get_number_of_transfering_threads(), 4)
class CustomStateBackendTests(PyFlinkTestCase):
def test_create_custom_state_backend(self):
gateway = get_gateway()
JConfiguration = gateway.jvm.org.apache.flink.configuration.Configuration
j_config = JConfiguration()
j_factory = load_java_class("org.apache.flink.streaming.runtime.tasks."
"StreamTaskTest$TestMemoryStateBackendFactory").newInstance()
context_classloader = gateway.jvm.Thread.currentThread().getContextClassLoader()
state_backend = _from_j_state_backend(j_factory.createFromConfig(j_config,
context_classloader))
self.assertIsInstance(state_backend, CustomStateBackend)
| apache-2.0 |
kmod/icbd | icbd/type_analyzer/tests/classes.py | 1 | 6603 | def f1():
class A(object):
k = 2
def __init__(self, x): # 12 (<A|B>,int) -> None
self.x = x
self.m = 1
pass
def bar(self): # 12 (<A|B>) -> int
self.y = 2
return self.x
def baz(self): # 16 A
self.z = 1
def foo(self):
return 2
a = A(1) # 4 A # 8 class 'A'
a2 = A(1)
f1 = A.foo # 4 (A) -> int
f2 = a.foo # 4 () -> int
x1 = f1() # 4 <unknown> # e 9
x2 = f2() # 4 int
a2.m # 7 <int|str>
a.m # 6 <int|str>
a.m = "" # 6 <int|str>
a.m # 6 <int|str>
a2.m # 7 <int|str>
a2.y # 7 int
y = a.x # 4 int
z = a.z # 4 int
w = a.w # 4 <unknown> # e 8
y = a.y # 4 int
z = a.bar() # 4 int
y = a.y # 4 int
class B(A):
pass
def f(x): # 8 (<A|B>) -> <A|B>
return x
b = B(1,2,3) # e 8 # 4 B
b = B(2) # 4 B
A.k # 4 class 'A' # 6 int
a.k # 4 A # 6 int
B.k # 4 class 'B' # 6 int
b.k # 4 B # 6 int
b.y # 6 int
x = b.bar() # 4 int # 8 B # 10 () -> int
f(b) # 4 (<A|B>) -> <A|B> # 6 B
f(a) # 4 (<A|B>) -> <A|B> # 6 A
def f2():
class LinkedList(object):
def __init__(self, l):
if l:
self.link = LinkedList(l[1:])
self.val = l[0]
else:
self.link = None
self.val = None
self.x = 1
k = LinkedList(range(5))
k.link
k.val
k.x
x = k.link.link.link.link.link.link.link.link.link.link.link.link.link.link.link.link.link.val # 4 int # 8 LinkedList # 10 LinkedList # 15 LinkedList # 20 LinkedList # 90 LinkedList # 95 int
y = x # 4 int
def f3():
class C(object):
pass
def ident(self):
return self
c = C()
c2 = C()
c2.x # 7 int
c.x = 1 # 6 int
C.f = ident
x = c.f() # 4 C
def ident(self):
return self
c.g = ident
y = c.g() # e 8 # 4 int
z = c.g(2) # 4 int
def f4():
" Test class resolution order "
class X(object):
a = 1
b = 1
c = 1
d = 1
def __init__(self): # 21 <X|Y>
self.a = ""
self.b = ""
class Y(X):
a = object()
b = object()
c = object()
def __init__(self):
super(Y, self).__init__()
self.a = [1]
x = X()
x.e = 1
y = Y() # 4 Y
y.a # 6 <[int]|str>
y.b # 6 str
y.c # 6 object
y.d # 6 int
y.e # e 4
def f5():
"""
" test super behavior (also class redefinition) "
class X(object):
def __init__(self):
self.name = "X"
def get_name(self):
return self.name
class Y(X):
def __init__(self):
super(Y, self).__init__()
self.name = 2
y = Y()
y.name ! 2 int
y.a = 1
x = super(Y, y)
x.a ! e 0
x.name ! 2 str
super(Y, y).name ! 12 str
" this is probably not going to be supported any time soon "
'''
n = super(Y, y).get_name() ! 0 int ! 16 () -> int
'''
"""
def f6():
# test __init__ inheritance
class A(object):
def __init__(self, x): # 27 int
self.x = x # 17 int # 21 int
class B(A):
pass
b = B(2) # 4 B
b.x # 6 int
def f7():
def A(object):
pass
# test that super.__init__ can be called even if it wasnt explicitly defined
def B(object):
def __init__(self):
super(B, self).__init__()
def f8():
# testing default behavior
class C(object):
pass
c = C() # 4 C
b = not c # 4 bool
b = (c == c) # 4 bool
x = hash(c) # 4 int
def f9():
i = 0
class C(object):
global i
for i in xrange(3):
j = i
c = C()
print i # 10 int
print c.j # 10 C # 12 int
def f10():
class C(object): # 10 class 'C'
class D(object): # 14 class 'D'
pass
def __init__(self):
self.d = C.D() # 17 D
c = C() # 4 C
print c.d # 10 C # 12 D
def f11():
class C(object):
cls_arg = 1
def __init__(self):
x = cls_arg # e 16
y = C.cls_arg # 12 int # 18 int
def f12():
def f(x): # 8 (int) -> str
return str(x)*2
class C(object):
i2 = 1 # 8 int
j = f(i2) # 8 str
k = i2 # 8 int
def __init__(self):
print i2 # e 18
print C.i2 # 12 int
print C.j # 12 str
print C.k # 12 int
def f13():
def f():
pass
class C(object):
x = 1 # 8 int
f()
x # 8 int
def f14():
class A(object):
def bar(self):
return 1
class B(object):
def bar(self):
return ''
class C(A, B):
def bar(self):
return super(C, self).bar()
c = C() # 4 C
x = c.bar() # 4 int
def f15():
class C(object):
def bar(*args): # 12 (*[C]) -> [C]
return args # 19 [C]
c = C() # 4 C
l = c.bar() # 4 [C]
def f16():
# Testing staticmethod behavior
class C(object):
@staticmethod
def foo1(x):
return x
@staticmethod
def foo2(x):
return x
def foo3(x):
return x
class B(object):
pass
B.foo1 = C.foo1
s = staticmethod(C.foo2)
B.foo2 = s
x = B.foo1(B()) # 4 B # 10 (B) -> B
x = B.foo1(2) # e 8
y = B.foo2(2) # 4 int # 10 (int) -> int
b = B()
f1 = b.foo1 # 4 () -> B
f2 = b.foo2 # 4 (int) -> int
b.foo1()
b.foo2() # e 4
def f17():
class A(object):
def __init__(self):
pass
class B(A):
pass
b = B() # 4 B
b.__init__() # 4 B # 6 () -> None
class C(list):
pass
c = C()
x = c.pop() # 4 <mixed>
c.__init__([1,2])
c.__init__({1:2})
def f18():
class A(object):
def __init__(self): # 21 <A|B>
self.x = 1
class B(A):
def bar(self):
return self.x # 24 int
class C(object):
pass
def f19():
class C(object):
a = 1
def __init__(self):
print a # e 18
C()
x20 = 1 # 0 int
def f20():
x20 = '' # 4 str
class C(object):
global x20
def __init__(self):
print x20 # 18 str
return C()
x21 = 1 # 0 int
def f21():
x21 = 's' # 4 str
class C(object):
global x21
class D(object):
global x21
def __init__(self):
print x21 # 22 str
return C.D()
| mit |
kittehcoin/p2pool | p2pool/test/util/test_math.py | 283 | 1198 | from __future__ import division
import random
import unittest
from p2pool.util import math
def generate_alphabet():
if random.randrange(2):
return None
else:
a = map(chr, xrange(256))
random.shuffle(a)
return a[:random.randrange(2, len(a))]
class Test(unittest.TestCase):
def test_add_tuples(self):
assert math.add_tuples((1, 2, 3), (4, 5, 6)) == (5, 7, 9)
def test_bases(self):
for i in xrange(10):
alphabet = generate_alphabet()
for i in xrange(100):
n = random.choice([
random.randrange(3),
random.randrange(300),
random.randrange(100000000000000000000000000000),
])
s = math.natural_to_string(n, alphabet)
n2 = math.string_to_natural(s, alphabet)
#print n, s.encode('hex'), n2
self.assertEquals(n, n2)
def test_binom(self):
for n in xrange(1, 100):
for x in xrange(n + 1):
left, right = math.binomial_conf_interval(x, n)
assert 0 <= left <= x/n <= right <= 1, (left, right, x, n)
| gpl-3.0 |
tima/ansible | lib/ansible/modules/cloud/ovirt/ovirt_storage_connections.py | 43 | 9748 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_storage_connections
short_description: Module to manage storage connections in oVirt
version_added: "2.4"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage storage connections in oVirt"
options:
id:
description:
- "Id of the storage connection to manage."
state:
description:
- "Should the storage connection be present or absent."
choices: ['present', 'absent']
default: present
storage:
description:
- "Name of the storage domain to be used with storage connection."
address:
description:
- "Address of the storage server. E.g.: myserver.mydomain.com"
path:
description:
- "Path of the mount point of the storage. E.g.: /path/to/my/data"
nfs_version:
description:
- "NFS version. One of: I(auto), I(v3), I(v4) or I(v4_1)."
nfs_timeout:
description:
- "The time in tenths of a second to wait for a response before retrying NFS requests. Range 0 to 65535."
nfs_retrans:
description:
- "The number of times to retry a request before attempting further recovery actions. Range 0 to 65535."
mount_options:
description:
- "Option which will be passed when mounting storage."
password:
description:
- "A CHAP password for logging into a target."
username:
description:
- "A CHAP username for logging into a target."
port:
description:
- "Port of the iSCSI storage server."
target:
description:
- "The target IQN for the storage device."
type:
description:
- "Storage type. For example: I(nfs), I(iscsi), etc."
vfs_type:
description:
- "Virtual File System type."
force:
description:
- "This parameter is releven only when updating a connection."
- "If I(true) the storage domain don't have to be in I(MAINTENANCE)
state, so the storage connection is updated."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add new storage connection:
- ovirt_storage_connections:
storage: myiscsi
address: 10.34.63.199
target: iqn.2016-08-09.domain-01:nickname
port: 3260
type: iscsi
# Update the existing storage connection address:
- ovirt_storage_connections:
id: 26915c96-92ff-47e5-9e77-b581db2f2d36
address: 10.34.63.204
force: true
# Remove storage connection:
- ovirt_storage_connections:
id: 26915c96-92ff-47e5-9e77-b581db2f2d36
'''
RETURN = '''
id:
description: ID of the storage connection which is managed
returned: On success if storage connection is found.
type: string
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
storage_connection:
description: "Dictionary of all the storage connection attributes. Storage connection attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_connection."
returned: On success if storage connection is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class StorageConnectionModule(BaseModule):
def build_entity(self):
return otypes.StorageConnection(
address=self.param('address'),
path=self.param('path'),
nfs_version=self.param('nfs_version'),
nfs_timeo=self.param('nfs_timeout'),
nfs_retrans=self.param('nfs_retrans'),
mount_options=self.param('mount_options'),
password=self.param('password'),
username=self.param('username'),
port=self.param('port'),
target=self.param('target'),
type=otypes.StorageType(
self.param('type')
) if self.param('type') is not None else None,
vfs_type=self.param('vfs_type'),
)
def post_present(self, entity_id):
if self.param('storage'):
sds_service = self._connection.system_service().storage_domains_service()
sd = search_by_name(sds_service, self.param('storage'))
if sd is None:
raise Exception(
"Storage '%s' was not found." % self.param('storage')
)
if entity_id not in [
sd_conn.id for sd_conn in self._connection.follow_link(sd.storage_connections)
]:
scs_service = sds_service.storage_domain_service(sd.id).storage_connections_service()
if not self._module.check_mode:
scs_service.add(
connection=otypes.StorageConnection(
id=entity_id,
),
)
self.changed = True
def update_check(self, entity):
return (
equal(self.param('address'), entity.address) and
equal(self.param('path'), entity.path) and
equal(self.param('nfs_version'), entity.nfs_version) and
equal(self.param('nfs_timeout'), entity.nfs_timeo) and
equal(self.param('nfs_retrans'), entity.nfs_retrans) and
equal(self.param('mount_options'), entity.mount_options) and
equal(self.param('password'), entity.password) and
equal(self.param('username'), entity.username) and
equal(self.param('port'), entity.port) and
equal(self.param('target'), entity.target) and
equal(self.param('type'), str(entity.type)) and
equal(self.param('vfs_type'), entity.vfs_type)
)
def find_sc_by_attributes(module, storage_connections_service):
for sd_conn in [
sc for sc in storage_connections_service.list()
if str(sc.type) == module.params['type']
]:
sd_conn_type = str(sd_conn.type)
if sd_conn_type in ['nfs', 'posixfs', 'glusterfs', 'localfs']:
if (
module.params['address'] == sd_conn.address and
module.params['path'] == sd_conn.path
):
return sd_conn
elif sd_conn_type in ['iscsi', 'fcp']:
if (
module.params['address'] == sd_conn.address and
module.params['target'] == sd_conn.target
):
return sd_conn
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
id=dict(default=None),
address=dict(default=None),
path=dict(default=None),
nfs_version=dict(default=None),
nfs_timeout=dict(default=None, type='int'),
nfs_retrans=dict(default=None, type='int'),
mount_options=dict(default=None),
password=dict(default=None, no_log=True),
username=dict(default=None),
port=dict(default=None, type='int'),
target=dict(default=None),
type=dict(default=None),
vfs_type=dict(default=None),
force=dict(type='bool', default=False),
storage=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
storage_connections_service = connection.system_service().storage_connections_service()
storage_connection_module = StorageConnectionModule(
connection=connection,
module=module,
service=storage_connections_service,
)
entity = None
if module.params['id'] is None:
entity = find_sc_by_attributes(module, storage_connections_service)
state = module.params['state']
if state == 'present':
ret = storage_connection_module.create(
entity=entity,
update_params={'force': True},
)
storage_connection_module.post_present(ret['id'])
elif state == 'absent':
ret = storage_connection_module.remove(entity=entity)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
nitin-cherian/LifeLongLearning | Web_Development_Python/RealPython/flask-blog/env/flask-blog/lib/python3.5/site-packages/pip-9.0.1-py3.5.egg/pip/_vendor/requests/exceptions.py | 362 | 2860 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request.
"""
def __init__(self, *args, **kwargs):
"""Initialize RequestException with `request` and `response` objects."""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
"""The URL provided was somehow invalid."""
class InvalidHeader(RequestException, ValueError):
"""The header value provided was somehow invalid."""
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""A file was opened in text mode, but Requests determined its binary length."""
pass
| mit |
rwl/PyCIM | CIM14/ENTSOE/Dynamics/IEC61968/__init__.py | 1 | 1228 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#IEC61968"
nsPrefix = "cimIEC61968"
| mit |
bitifirefly/edx-platform | common/djangoapps/course_modes/migrations/0002_auto__add_field_coursemode_currency.py | 114 | 1475 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.currency'
db.add_column('course_modes_coursemode', 'currency',
self.gf('django.db.models.fields.CharField')(default='usd', max_length=8),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.currency'
db.delete_column('course_modes_coursemode', 'currency')
models = {
'course_modes.coursemode': {
'Meta': {'object_name': 'CourseMode'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 |
bobthekingofegypt/servo | tests/unit/stylo/check_bindings.py | 35 | 1339 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
ROOT_PATH = os.path.join("..", "..", "..")
INPUT_FILE = os.path.join(ROOT_PATH, "components", "style", "gecko_bindings", "bindings.rs")
OUTPUT_FILE = os.path.join(os.environ["OUT_DIR"], "check_bindings.rs")
GLUE_FILE = os.path.join(ROOT_PATH, "ports", "geckolib", "glue.rs")
GLUE_OUTPUT_FILE = os.path.join(os.environ["OUT_DIR"], "glue.rs")
TEMPLATE = """\
[ Servo_{name}, bindings::Servo_{name} ];
"""
with open(INPUT_FILE, "r") as bindings, open(OUTPUT_FILE, "w+") as tests:
tests.write("fn assert_types() {\n")
pattern = re.compile("fn\s*Servo_([a-zA-Z0-9_]+)\s*\(")
for line in bindings:
match = pattern.search(line)
# GetStyleVariables is a Servo_* function, but temporarily defined on
# the gecko side
if match and match.group(1) != "GetStyleVariables":
tests.write(TEMPLATE.format(name=match.group(1)))
tests.write("}\n")
with open(GLUE_FILE, "r") as glue, open(GLUE_OUTPUT_FILE, "w+") as glue_output:
for line in glue:
glue_output.write(line.replace("pub extern \"C\" fn", "pub unsafe extern \"C\" fn"))
| mpl-2.0 |
CMLL/wesnoth | utils/pofix.py | 15 | 9455 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pofix - perform string fixups on incoming .po files.
#
# The purpose of this script is to save translators from having to
# apply various string fixes needed before stable release by hand. It is
# intended to be run on each incoming .po file as the Lord of
# Translations receives it. However, translators may run it on their
# own .po files to be sure, as a second application will harmlessly do
# nothing.
#
# To use this script, give it one or more paths to .po files as
# command-line arguments. Each file will be tweaked as needed.
# It should work on Windows and MacOS X as well as Linux, provided
# you have Python installed.
#
# This script will emit a report line for each file it modifies,
# and save a backup copy of the original with extension "-bak".
#
# This script will tell you when it is obsolete. Run it against all .po
# files in the main Wesnoth tree; when it says none are older than this script,
# it can be discarded (assunming that it has in fact been used to transform
# all incoming .po files in the meantime).
#
#
# NOTE: IMPORTANT!
# When altering this file ALWAYS use the following steps:
# * from the checkout root call: ./utils/pofix.py po/wesnoth*/wesnoth*.pot
# * check if any strings were changed and if only the intended strings were changed
# by using e.g. "normal" diff tools or git diff on the changed .pot files
# * if everything was fine, proceed, if something went wrong revert the changed pot
# files, adjust pofix.py and rerun the above step
# * run: ./utils/pofix.py po/wesnoth*/*.po
# * commit all changed files together (pofix.py as well as the changed pot and po
# files)
#
#
# (old) example usage:
# utils/pofix.py po/wesnoth*/*.po*
# find data/campaigns/ -name '*.cfg' -print0 | xargs -0 utils/pofix.py
#
# To make use of >1 CPU core, you have to rely on xargs. In this sample 10 files
# are handed over to 4 instances of pofix.py:
# ls po/wesnoth*/*.po* | xargs -P 4 -n 10 ./utils/pofix.py
#
#
# Please do make sure to add a comment before any new blocks of conversions
# that states when it was added (current version number is enough) so that
# the file can be cleaned up more easily every now and then.
# Example:
# # conversion added in 1.9.5+dev
# ("foo addwd bar", "foo added bar"),
# # conversion added in 1.9.8+dev
# ("fooba foo", "foobar foo"),
#
# NOTE:
# Those "textdomain" entries are *only* a hint and don't influence the files on
# which pofix will be applied. All replacements will always be applied on *ALL*
# files!
game_stringfixes = {
"wesnoth" : (
# conversion added in 1.11.10+dev
("Save and Abort game", "Save and abort game"),
),
"wesnoth-editor" : (
# conversion added in 1.11.10+dev
("Choose file", "Choose File"),
),
"wesnoth-lib" : (
# conversion added in 1.11.15+dev
("SP/MP Campaigns", "SP/MP campaigns"),
),
"wesnoth-httt" : (
# fix added in 1.10.0+dev
("Second, who you most", "Second, whom you most"),
# fix added in 1.11.16+dev
("Who then is your leader? Who do we serve?", "Who then is your leader? Whom do we serve?"),
),
"wesnoth-nr" : (
# fixes added in 1.12.0+dev
("They are stronger then we thought.", "They are stronger than we thought."),
("Hmmm, they are stronger then we thought", "Hmmm, they are stronger than we thought"),
("torment other then self destruction.", "torment other than self destruction."),
("Rod of Justice for more then a few", "Rod of Justice for more than a few"),
("you have aided the Northern Elves more then you can imagine.", "you have aided the Northern Elves more than you can imagine."),
("been more then a few months ago", "been more than a few months ago"),
("they cannot be more then two days’ march from here.", "they cannot be more than two days’ march from here."),
("It couldna’ been more then a day now.", "It couldna’ been more than a day now."),
("It couldna’ ha’ been more then a day now.", "It couldna’ ha’ been more than a day now."),
("They are no more then a few days", "They are no more than a few days"),
("fearsome a foe then a dwarf.", "fearsome a foe than a dwarf."),
("hold the orcs off far longer and with less loss then ye could ha’ done", "hold the orcs off far longer and with less loss than ye could ha’ done"),
("Bah! I have better things to do then stamp out your insignificant life.", "Bah! I have better things to do than stamp out your insignificant life."),
),
"wesnoth-sotbe" : (
# fixes added in 1.12.0+dev
("Easier said then done, Chief. There are many humans in that city.", "Easier said than done, Chief. There are many humans in that city."),
("then your kind can. Take the orcish prisoners and hurry to Melmog.", "than your kind can. Take the orcish prisoners and hurry to Melmog."),
("Better late then never. Now it’s time to kill!", "Better late than never. Now it’s time to kill!"),
("becomes no less then a boot-licking spy for the humans.", "becomes no less than a boot-licking spy for the humans."),
("consequently, those orcs thirsting for battle got more then", "consequently, those orcs thirsting for battle got more than"),
),
"wesnoth-tutorial" : (
# conversion added in 1.11.0-dev
("$unit.type", "$unit.language_name"),
),
"wesnoth-utbs" : (
# fixes added in 1.12.0+dev
("On the tallest peak was build", "On the tallest peak was built"),
("He killed himself rather then surrender to us!", "He killed himself rather than surrender to us!"),
("bigger distraction then they were expecting.", "bigger distraction than they were expecting."),
),
}
website_stringfixes = {
"1.12-announcement" : (
("wesnoth-1.12.2/wesnoth-1.12.2.tar.bz2", "wesnoth-1.12.4/wesnoth-1.12.4.tar.bz2"),
("wesnoth-1.12.2/wesnoth-1.12.2-win32.exe", "wesnoth-1.12.4/wesnoth-1.12.4a-win32.exe"),
("wesnoth-1.12.2/Wesnoth_1.12.2.dmg", "wesnoth-1.12.4/Wesnoth_1.12.4.dmg"),
),
}
# Whether -w was passed in the command line. Selects website_stringfixes
# instead of game_stringfixes.
website_mode = 0
# Speak, if all argument files are newer than this timestamp
# Try to use UTC here
# date --utc "+%s # %c"
timecheck = 1283156523 # Mo 30 Aug 2010 08:22:03 UTC
import os, sys, time, stat, re, argparse
try:
from multiprocessing import Pool, cpu_count
def parallel_map(*args, **kw):
pool = Pool(cpu_count())
return pool.map(*args, **kw)
except ImportError:
print ("Failed to import 'multiprocessing' module. Multiple cpu cores won't be utilized")
parallel_map = map
def process_file(path):
before = open(path, "r").read()
decommented = re.sub("#.*", "", before)
lines = before.split('\n')
if website_mode:
stringfixes = website_stringfixes
else:
stringfixes = game_stringfixes
for (domain, fixes) in stringfixes.items():
# In case of screwed-up pairs that are hard to find, uncomment the following:
#for fix in fixes:
# if len(fix) != 2:
# print fix
for (old, new) in fixes:
if old is new:
#complain loudly
print ("pofix: old string\n\t\"%s\"\n equals new string\n\t\"%s\"\nexiting." % (old, new))
sys.exit(1)
#this check is problematic and the last clause is added to prevent false
#positives in case that new is a substring of old, though this can also
#lead to "real" probs not found, the real check would be "does replacing
#old with new lead to duplicate msgids? (including old ones marked with #~)"
#which is not easily done in the current design...
elif new in decommented and old in decommented and not new in old:
print ("pofix: %s already includes the new string\n\t\"%s\"\nbut also the old\n\t\"%s\"\nthis needs handfixing for now since it likely creates duplicate msgids." % (path, new, old))
else:
for (i, line) in enumerate(lines):
if line and line[0] != '#':
lines[i] = lines[i].replace(old, new)
after = '\n'.join(lines)
if after != before:
print ("pofix: %s modified" % path)
# Save a backup
os.rename(path, path + "-bak")
# Write out transformed version
ofp = open(path, "w")
ofp.write(after)
ofp.close()
return 1
else:
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', action='store_true', dest='website_mode',
help='selects the website string fixes table instead of the game string fixes table')
parser.add_argument('paths', nargs='*')
args = parser.parse_args()
website_mode = args.website_mode
if website_mode:
print("pofix: Using website string fixes table")
newer = 0
modified = 0
pocount = 0
files = []
for path in args.paths:
if not path.endswith(".po") and not path.endswith(".pot") and not path.endswith(".cfg") and not path.endswith(".html"):
continue
pocount += 1
# Notice how many files are newer than the time check
statinfo = os.stat(path)
if statinfo.st_mtime > timecheck:
newer += 1
files.append(path)
modified = sum(parallel_map(process_file, files))
print ("pofix: %d files processed, %d files modified, %d files newer" \
% (pocount, modified, newer))
if pocount > 1 and newer == pocount:
print ("pofix: script may be obsolete")
| gpl-2.0 |
jimcunderwood/MissionPlanner | Lib/lib2to3/fixes/fix_imports.py | 61 | 5838 | """Fix incompatible imports and module references."""
# Authors: Collin Winter, Nick Edds
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {'StringIO': 'io',
'cStringIO': 'io',
'cPickle': 'pickle',
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'FileDialog': 'tkinter.filedialog',
'tkFileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
# anydbm and whichdb are handled by fix_imports2
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
#'test.test_support': 'test.support',
'commands': 'subprocess',
'UserString' : 'collections',
'UserList' : 'collections',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern(mapping=MAPPING):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(fixer_base.BaseFix):
BM_compatible = True
keep_line_order = True
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = unicode(self.mapping[mod_name])
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
| gpl-3.0 |
stewnorriss/letsencrypt | letsencrypt/client.py | 13 | 20023 | """Let's Encrypt client API."""
import logging
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import OpenSSL
import zope.component
from acme import client as acme_client
from acme import jose
from acme import messages
from letsencrypt import account
from letsencrypt import auth_handler
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import continuity_auth
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import reverter
from letsencrypt import revoker
from letsencrypt import storage
from letsencrypt.display import ops as display_ops
from letsencrypt.display import enhancements
logger = logging.getLogger(__name__)
def _acme_from_config_key(config, key):
# TODO: Allow for other alg types besides RS256
return acme_client.Client(new_reg_uri=config.server, key=key,
verify_ssl=(not config.no_verify_ssl))
def register(config, account_storage, tos_cb=None):
"""Register new account with an ACME CA.
This function takes care of generating fresh private key,
registering the account, optionally accepting CA Terms of Service
and finally saving the account. It should be called prior to
initialization of `Client`, unless account has already been created.
:param .IConfig config: Client configuration.
:param .AccountStorage account_storage: Account storage where newly
registered account will be saved to. Save happens only after TOS
acceptance step, so any account private keys or
`.RegistrationResource` will not be persisted if `tos_cb`
returns ``False``.
:param tos_cb: If ACME CA requires the user to accept a Terms of
Service before registering account, client action is
necessary. For example, a CLI tool would prompt the user
acceptance. `tos_cb` must be a callable that should accept
`.RegistrationResource` and return a `bool`: ``True`` iff the
Terms of Service present in the contained
`.Registration.terms_of_service` is accepted by the client, and
``False`` otherwise. ``tos_cb`` will be called only if the
client acction is necessary, i.e. when ``terms_of_service is not
None``. This argument is optional, if not supplied it will
default to automatic acceptance!
:raises letsencrypt.errors.Error: In case of any client problems, in
particular registration failure, or unaccepted Terms of Service.
:raises acme.errors.Error: In case of any protocol problems.
:returns: Newly registered and saved account, as well as protocol
API handle (should be used in `Client` initialization).
:rtype: `tuple` of `.Account` and `acme.client.Client`
"""
# Log non-standard actions, potentially wrong API calls
if account_storage.find_all():
logger.info("There are already existing accounts for %s", config.server)
if config.email is None:
logger.warn("Registering without email!")
# Each new registration shall use a fresh new key
key = jose.JWKRSA(key=jose.ComparableRSAKey(
rsa.generate_private_key(
public_exponent=65537,
key_size=config.rsa_key_size,
backend=default_backend())))
acme = _acme_from_config_key(config, key)
# TODO: add phone?
regr = acme.register(messages.NewRegistration.from_data(email=config.email))
if regr.terms_of_service is not None:
if tos_cb is not None and not tos_cb(regr):
raise errors.Error(
"Registration cannot proceed without accepting "
"Terms of Service.")
regr = acme.agree_to_tos(regr)
acc = account.Account(regr, key)
account.report_new_account(acc, config)
account_storage.save(acc)
return acc, acme
class Client(object):
"""ACME protocol client.
:ivar .IConfig config: Client configuration.
:ivar .Account account: Account registered with `register`.
:ivar .AuthHandler auth_handler: Authorizations handler that will
dispatch DV and Continuity challenges to appropriate
authenticators (providing `.IAuthenticator` interface).
:ivar .IInstaller installer: Installer.
:ivar acme.client.Client acme: Optional ACME client API handle.
You might already have one from `register`.
"""
def __init__(self, config, account_, dv_auth, installer, acme=None):
"""Initialize a client.
:param .IAuthenticator dv_auth: Prepared (`.IAuthenticator.prepare`)
authenticator that can solve the `.constants.DV_CHALLENGES`.
"""
self.config = config
self.account = account_
self.installer = installer
# Initialize ACME if account is provided
if acme is None and self.account is not None:
acme = _acme_from_config_key(config, self.account.key)
self.acme = acme
# TODO: Check if self.config.enroll_autorenew is None. If
# so, set it based to the default: figure out if dv_auth is
# standalone (then default is False, otherwise default is True)
if dv_auth is not None:
cont_auth = continuity_auth.ContinuityAuthenticator(config,
installer)
self.auth_handler = auth_handler.AuthHandler(
dv_auth, cont_auth, self.acme, self.account)
else:
self.auth_handler = None
def _obtain_certificate(self, domains, csr):
"""Obtain certificate.
Internal function with precondition that `domains` are
consistent with identifiers present in the `csr`.
:param list domains: Domain names.
:param .le_util.CSR csr: DER-encoded Certificate Signing
Request. The key used to generate this CSR can be different
than `authkey`.
:returns: `.CertificateResource` and certificate chain (as
returned by `.fetch_chain`).
:rtype: tuple
"""
if self.auth_handler is None:
msg = ("Unable to obtain certificate because authenticator is "
"not set.")
logger.warning(msg)
raise errors.Error(msg)
if self.account.regr is None:
raise errors.Error("Please register with the ACME server first.")
logger.debug("CSR: %s, domains: %s", csr, domains)
authzr = self.auth_handler.get_authorizations(domains)
certr = self.acme.request_issuance(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)),
authzr)
return certr, self.acme.fetch_chain(certr)
def obtain_certificate_from_csr(self, csr):
"""Obtain certficiate from CSR.
:param .le_util.CSR csr: DER-encoded Certificate Signing
Request.
:returns: `.CertificateResource` and certificate chain (as
returned by `.fetch_chain`).
:rtype: tuple
"""
return self._obtain_certificate(
# TODO: add CN to domains?
crypto_util.get_sans_from_csr(
csr.data, OpenSSL.crypto.FILETYPE_ASN1), csr)
def obtain_certificate(self, domains):
"""Obtains a certificate from the ACME server.
`.register` must be called before `.obtain_certificate`
:param set domains: domains to get a certificate
:returns: `.CertificateResource`, certificate chain (as
returned by `.fetch_chain`), and newly generated private key
(`.le_util.Key`) and DER-encoded Certificate Signing Request
(`.le_util.CSR`).
:rtype: tuple
"""
# Create CSR from names
key = crypto_util.init_save_key(
self.config.rsa_key_size, self.config.key_dir)
csr = crypto_util.init_save_csr(key, domains, self.config.cert_dir)
return self._obtain_certificate(domains, csr) + (key, csr)
def obtain_and_enroll_certificate(
self, domains, authenticator, installer, plugins):
"""Obtain and enroll certificate.
Get a new certificate for the specified domains using the specified
authenticator and installer, and then create a new renewable lineage
containing it.
:param list domains: Domains to request.
:param authenticator: The authenticator to use.
:type authenticator: :class:`letsencrypt.interfaces.IAuthenticator`
:param installer: The installer to use.
:type installer: :class:`letsencrypt.interfaces.IInstaller`
:param plugins: A PluginsFactory object.
:returns: A new :class:`letsencrypt.storage.RenewableCert` instance
referred to the enrolled cert lineage, or False if the cert could
not be obtained.
"""
certr, chain, key, _ = self.obtain_certificate(domains)
# TODO: remove this dirty hack
self.config.namespace.authenticator = plugins.find_init(
authenticator).name
if installer is not None:
self.config.namespace.installer = plugins.find_init(installer).name
# XXX: We clearly need a more general and correct way of getting
# options into the configobj for the RenewableCert instance.
# This is a quick-and-dirty way to do it to allow integration
# testing to start. (Note that the config parameter to new_lineage
# ideally should be a ConfigObj, but in this case a dict will be
# accepted in practice.)
params = vars(self.config.namespace)
config = {}
cli_config = configuration.RenewerConfiguration(self.config.namespace)
if (cli_config.config_dir != constants.CLI_DEFAULTS["config_dir"] or
cli_config.work_dir != constants.CLI_DEFAULTS["work_dir"]):
logger.warning(
"Non-standard path(s), might not work with crontab installed "
"by your operating system package manager")
# XXX: just to stop RenewableCert from complaining; this is
# probably not a good solution
chain_pem = "" if chain is None else OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, chain)
lineage = storage.RenewableCert.new_lineage(
domains[0], OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, certr.body),
key.pem, chain_pem, params, config, cli_config)
self._report_renewal_status(lineage)
return lineage
def _report_renewal_status(self, cert):
# pylint: disable=no-self-use
"""Informs the user about automatic renewal and deployment.
:param .RenewableCert cert: Newly issued certificate
"""
if ("autorenew" not in cert.configuration
or cert.configuration.as_bool("autorenew")):
if ("autodeploy" not in cert.configuration or
cert.configuration.as_bool("autodeploy")):
msg = "Automatic renewal and deployment has "
else:
msg = "Automatic renewal but not automatic deployment has "
else:
if ("autodeploy" not in cert.configuration or
cert.configuration.as_bool("autodeploy")):
msg = "Automatic deployment but not automatic renewal has "
else:
msg = "Automatic renewal and deployment has not "
msg += ("been enabled for your certificate. These settings can be "
"configured in the directories under {0}.").format(
cert.cli_config.renewal_configs_dir)
reporter = zope.component.getUtility(interfaces.IReporter)
reporter.add_message(msg, reporter.LOW_PRIORITY, True)
def save_certificate(self, certr, chain_cert, cert_path, chain_path):
# pylint: disable=no-self-use
"""Saves the certificate received from the ACME server.
:param certr: ACME "certificate" resource.
:type certr: :class:`acme.messages.Certificate`
:param chain_cert:
:param str cert_path: Candidate path to a certificate.
:param str chain_path: Candidate path to a certificate chain.
:returns: cert_path, chain_path (absolute paths to the actual files)
:rtype: `tuple` of `str`
:raises IOError: If unable to find room to write the cert files
"""
for path in cert_path, chain_path:
le_util.make_or_verify_dir(
os.path.dirname(path), 0o755, os.geteuid())
# try finally close
cert_chain_abspath = None
cert_file, act_cert_path = le_util.unique_file(cert_path, 0o644)
# TODO: Except
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, certr.body)
try:
cert_file.write(cert_pem)
finally:
cert_file.close()
logger.info("Server issued certificate; certificate written to %s",
act_cert_path)
if chain_cert is not None:
chain_file, act_chain_path = le_util.unique_file(
chain_path, 0o644)
# TODO: Except
chain_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, chain_cert)
try:
chain_file.write(chain_pem)
finally:
chain_file.close()
logger.info("Cert chain written to %s", act_chain_path)
# This expects a valid chain file
cert_chain_abspath = os.path.abspath(act_chain_path)
return os.path.abspath(act_cert_path), cert_chain_abspath
def deploy_certificate(self, domains, privkey_path, cert_path, chain_path):
"""Install certificate
:param list domains: list of domains to install the certificate
:param str privkey_path: path to certificate private key
:param str cert_path: certificate file path (optional)
:param str chain_path: chain file path
"""
if self.installer is None:
logger.warning("No installer specified, client is unable to deploy"
"the certificate")
raise errors.Error("No installer available")
chain_path = None if chain_path is None else os.path.abspath(chain_path)
for dom in domains:
# TODO: Provide a fullchain reference for installers like
# nginx that want it
self.installer.deploy_cert(
dom, os.path.abspath(cert_path),
os.path.abspath(privkey_path), chain_path)
self.installer.save("Deployed Let's Encrypt Certificate")
# sites may have been enabled / final cleanup
self.installer.restart()
display_ops.success_installation(domains)
def enhance_config(self, domains, redirect=None):
"""Enhance the configuration.
.. todo:: This needs to handle the specific enhancements offered by the
installer. We will also have to find a method to pass in the chosen
values efficiently.
:param list domains: list of domains to configure
:param redirect: If traffic should be forwarded from HTTP to HTTPS.
:type redirect: bool or None
:raises .errors.Error: if no installer is specified in the
client.
"""
if self.installer is None:
logger.warning("No installer is specified, there isn't any "
"configuration to enhance.")
raise errors.Error("No installer available")
if redirect is None:
redirect = enhancements.ask("redirect")
if redirect:
self.redirect_to_ssl(domains)
def redirect_to_ssl(self, domains):
"""Redirect all traffic from HTTP to HTTPS
:param vhost: list of ssl_vhosts
:type vhost: :class:`letsencrypt.interfaces.IInstaller`
"""
for dom in domains:
try:
self.installer.enhance(dom, "redirect")
except errors.PluginError:
logger.warn("Unable to perform redirect for %s", dom)
self.installer.save("Add Redirects")
self.installer.restart()
def validate_key_csr(privkey, csr=None):
"""Validate Key and CSR files.
Verifies that the client key and csr arguments are valid and correspond to
one another. This does not currently check the names in the CSR due to
the inability to read SANs from CSRs in python crypto libraries.
If csr is left as None, only the key will be validated.
:param privkey: Key associated with CSR
:type privkey: :class:`letsencrypt.le_util.Key`
:param .le_util.CSR csr: CSR
:raises .errors.Error: when validation fails
"""
# TODO: Handle all of these problems appropriately
# The client can eventually do things like prompt the user
# and allow the user to take more appropriate actions
# Key must be readable and valid.
if privkey.pem and not crypto_util.valid_privkey(privkey.pem):
raise errors.Error("The provided key is not a valid key")
if csr:
if csr.form == "der":
csr_obj = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.data)
csr = le_util.CSR(csr.file, OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, csr_obj), "pem")
# If CSR is provided, it must be readable and valid.
if csr.data and not crypto_util.valid_csr(csr.data):
raise errors.Error("The provided CSR is not a valid CSR")
# If both CSR and key are provided, the key must be the same key used
# in the CSR.
if csr.data and privkey.pem:
if not crypto_util.csr_matches_pubkey(
csr.data, privkey.pem):
raise errors.Error("The key and CSR do not match")
def rollback(default_installer, checkpoints, config, plugins):
"""Revert configuration the specified number of checkpoints.
:param int checkpoints: Number of checkpoints to revert.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
# Misconfigurations are only a slight problems... allow the user to rollback
installer = display_ops.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for rollback?")
# No Errors occurred during init... proceed normally
# If installer is None... couldn't find an installer... there shouldn't be
# anything to rollback
if installer is not None:
installer.rollback_checkpoints(checkpoints)
installer.restart()
def revoke(default_installer, config, plugins, no_confirm, cert, authkey):
"""Revoke certificates.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
installer = display_ops.pick_installer(
config, default_installer, plugins, question="Which installer "
"should be used for certificate revocation?")
revoc = revoker.Revoker(installer, config, no_confirm)
# Cert is most selective, so it is chosen first.
if cert is not None:
revoc.revoke_from_cert(cert[0])
elif authkey is not None:
revoc.revoke_from_key(le_util.Key(authkey[0], authkey[1]))
else:
revoc.revoke_from_menu()
def view_config_changes(config):
"""View checkpoints and associated configuration changes.
.. note:: This assumes that the installation is using a Reverter object.
:param config: Configuration.
:type config: :class:`letsencrypt.interfaces.IConfig`
"""
rev = reverter.Reverter(config)
rev.recovery_routine()
rev.view_config_changes()
| apache-2.0 |
petemounce/ansible | lib/ansible/modules/web_infrastructure/deploy_helper.py | 41 | 20164 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: deploy_helper
version_added: "2.0"
author: "Ramon de la Fuente (@ramondelafuente)"
short_description: Manages some of the steps common in deploying projects.
description:
- The Deploy Helper manages some of the steps common in deploying software.
It creates a folder structure, manages a symlink for the current release
and cleans up old releases.
- "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
C(project_path), whatever you set in the path parameter,
C(current_path), the path to the symlink that points to the active release,
C(releases_path), the path to the folder to keep releases in,
C(shared_path), the path to the folder to keep shared resources in,
C(unfinished_filename), the file to check for to recognize unfinished builds,
C(previous_release), the release the 'current' symlink is pointing to,
C(previous_release_path), the full path to the 'current' symlink target,
C(new_release), either the 'release' parameter or a generated timestamp,
C(new_release_path), the path to the new release folder (not created by the module)."
options:
path:
required: True
aliases: ['dest']
description:
- the root path of the project. Alias I(dest).
Returned in the C(deploy_helper.project_path) fact.
state:
required: False
choices: [ present, finalize, absent, clean, query ]
default: present
description:
- the state of the project.
C(query) will only gather facts,
C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
C(finalize) will remove the unfinished_filename file, create a symlink to the newly
deployed release and optionally clean old releases,
C(clean) will remove failed & old releases,
C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
release:
required: False
default: None
description:
- the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
You can use the generated fact C(release={{ deploy_helper.new_release }}).
releases_path:
required: False
default: releases
description:
- the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
Returned in the C(deploy_helper.releases_path) fact.
shared_path:
required: False
default: shared
description:
- the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
If this is set to an empty string, no shared folder will be created.
Returned in the C(deploy_helper.shared_path) fact.
current_path:
required: False
default: current
description:
- the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
Returned in the C(deploy_helper.current_path) fact.
unfinished_filename:
required: False
default: DEPLOY_UNFINISHED
description:
- the name of the file that indicates a deploy has not finished. All folders in the releases_path that
contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
automatically deleted from the I(new_release_path) during C(state=finalize).
clean:
required: False
default: True
description:
- Whether to run the clean procedure in case of C(state=finalize).
keep_releases:
required: False
default: 5
description:
- the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
will be deleted first, so only correct releases will count. The current version will not count.
notes:
- Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
new naming strategy without problems.
- Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
be much of a problem.
'''
EXAMPLES = '''
# General explanation, starting with an example folder structure for a project:
# root:
# releases:
# - 20140415234508
# - 20140415235146
# - 20140416082818
#
# shared:
# - sessions
# - uploads
#
# current: releases/20140416082818
# The 'releases' folder holds all the available releases. A release is a complete build of the application being
# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
# git tags or commit hashes.
#
# During a deploy, a new folder should be created in the releases folder and any build steps required should be
# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
# with a link to this build.
#
# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
#
# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
# release is reduced to the time it takes to switch the link.
#
# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
# procedure to remove it during cleanup.
# Typical usage
- name: Initialize the deploy root and gather facts
deploy_helper:
path: /path/to/root
- name: Clone the project to the new release folder
git:
repo: git://foosball.example.org/path/to/repo.git
dest: '{{ deploy_helper.new_release_path }}'
version: v1.1.1
- name: Add an unfinished file, to allow cleanup on successful finalize
file:
path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
state: touch
- name: Perform some build steps, like running your dependency manager for example
composer:
command: install
working_dir: '{{ deploy_helper.new_release_path }}'
- name: Create some folders in the shared folder
file:
path: '{{ deploy_helper.shared_path }}/{{ item }}'
state: directory
with_items:
- sessions
- uploads
- name: Add symlinks from the new release to the shared folder
file:
path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
state: link
with_items:
- path: app/sessions
src: sessions
- path: web/uploads
src: uploads
- name: Finalize the deploy, removing the unfinished file and switching the symlink
deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Retrieving facts before running a deploy
- name: Run 'state=query' to gather facts without changing anything
deploy_helper:
path: /path/to/root
state: query
# Remember to set the 'release' parameter when you actually call 'state=present' later
- name: Initialize the deploy root
deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: present
# all paths can be absolute or relative (to the 'path' parameter)
- deploy_helper:
path: /path/to/root
releases_path: /var/www/project/releases
shared_path: /var/www/shared
current_path: /var/www/active
# Using your own naming strategy for releases (a version tag in this case):
- deploy_helper:
path: /path/to/root
release: v1.1.1
state: present
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
# Using a different unfinished_filename:
- deploy_helper:
path: /path/to/root
unfinished_filename: README.md
release: '{{ deploy_helper.new_release }}'
state: finalize
# Postponing the cleanup of older builds:
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
clean: False
- deploy_helper:
path: /path/to/root
state: clean
# Or running the cleanup ahead of the new deploy
- deploy_helper:
path: /path/to/root
state: clean
- deploy_helper:
path: /path/to/root
state: present
# Keeping more old releases:
- deploy_helper:
path: /path/to/root
release: '{{ deploy_helper.new_release }}'
state: finalize
keep_releases: 10
# Or, if you use 'clean=false' on finalize:
- deploy_helper:
path: /path/to/root
state: clean
keep_releases: 10
# Removing the entire project root folder
- deploy_helper:
path: /path/to/root
state: absent
# Debugging the facts returned by the module
- deploy_helper:
path: /path/to/root
- debug:
var: deploy_helper
'''
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
class DeployHelper(object):
def __init__(self, module):
self.module = module
self.file_args = module.load_file_common_arguments(module.params)
self.clean = module.params['clean']
self.current_path = module.params['current_path']
self.keep_releases = module.params['keep_releases']
self.path = module.params['path']
self.release = module.params['release']
self.releases_path = module.params['releases_path']
self.shared_path = module.params['shared_path']
self.state = module.params['state']
self.unfinished_filename = module.params['unfinished_filename']
def gather_facts(self):
current_path = os.path.join(self.path, self.current_path)
releases_path = os.path.join(self.path, self.releases_path)
if self.shared_path:
shared_path = os.path.join(self.path, self.shared_path)
else:
shared_path = None
previous_release, previous_release_path = self._get_last_release(current_path)
if not self.release and (self.state == 'query' or self.state == 'present'):
self.release = time.strftime("%Y%m%d%H%M%S")
if self.release:
new_release_path = os.path.join(releases_path, self.release)
else:
new_release_path = None
return {
'project_path': self.path,
'current_path': current_path,
'releases_path': releases_path,
'shared_path': shared_path,
'previous_release': previous_release,
'previous_release_path': previous_release_path,
'new_release': self.release,
'new_release_path': new_release_path,
'unfinished_filename': self.unfinished_filename
}
def delete_path(self, path):
if not os.path.lexists(path):
return False
if not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
if not self.module.check_mode:
try:
shutil.rmtree(path, ignore_errors=False)
except Exception:
e = get_exception()
self.module.fail_json(msg="rmtree failed: %s" % str(e))
return True
def create_path(self, path):
changed = False
if not os.path.lexists(path):
changed = True
if not self.module.check_mode:
os.makedirs(path)
elif not os.path.isdir(path):
self.module.fail_json(msg="%s exists but is not a directory" % path)
changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
return changed
def check_link(self, path):
if os.path.lexists(path):
if not os.path.islink(path):
self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
def create_link(self, source, link_name):
changed = False
if os.path.islink(link_name):
norm_link = os.path.normpath(os.path.realpath(link_name))
norm_source = os.path.normpath(os.path.realpath(source))
if norm_link == norm_source:
changed = False
else:
changed = True
if not self.module.check_mode:
if not os.path.lexists(source):
self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
tmp_link_name = link_name + '.' + self.unfinished_filename
if os.path.islink(tmp_link_name):
os.unlink(tmp_link_name)
os.symlink(source, tmp_link_name)
os.rename(tmp_link_name, link_name)
else:
changed = True
if not self.module.check_mode:
os.symlink(source, link_name)
return changed
def remove_unfinished_file(self, new_release_path):
changed = False
unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
if os.path.lexists(unfinished_file_path):
changed = True
if not self.module.check_mode:
os.remove(unfinished_file_path)
return changed
def remove_unfinished_builds(self, releases_path):
changes = 0
for release in os.listdir(releases_path):
if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
if self.module.check_mode:
changes += 1
else:
changes += self.delete_path(os.path.join(releases_path, release))
return changes
def remove_unfinished_link(self, path):
changed = False
tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
if not self.module.check_mode and os.path.exists(tmp_link_name):
changed = True
os.remove(tmp_link_name)
return changed
def cleanup(self, releases_path, reserve_version):
changes = 0
if os.path.lexists(releases_path):
releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
try:
releases.remove(reserve_version)
except ValueError:
pass
if not self.module.check_mode:
releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
for release in releases[self.keep_releases:]:
changes += self.delete_path(os.path.join(releases_path, release))
elif len(releases) > self.keep_releases:
changes += (len(releases) - self.keep_releases)
return changes
def _get_file_args(self, path):
file_args = self.file_args.copy()
file_args['path'] = path
return file_args
def _get_last_release(self, current_path):
previous_release = None
previous_release_path = None
if os.path.lexists(current_path):
previous_release_path = os.path.realpath(current_path)
previous_release = os.path.basename(previous_release_path)
return previous_release, previous_release_path
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(aliases=['dest'], required=True, type='path'),
release=dict(required=False, type='str', default=None),
releases_path=dict(required=False, type='str', default='releases'),
shared_path=dict(required=False, type='path', default='shared'),
current_path=dict(required=False, type='path', default='current'),
keep_releases=dict(required=False, type='int', default=5),
clean=dict(required=False, type='bool', default=True),
unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
),
add_file_common_args=True,
supports_check_mode=True
)
deploy_helper = DeployHelper(module)
facts = deploy_helper.gather_facts()
result = {
'state': deploy_helper.state
}
changes = 0
if deploy_helper.state == 'query':
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'present':
deploy_helper.check_link(facts['current_path'])
changes += deploy_helper.create_path(facts['project_path'])
changes += deploy_helper.create_path(facts['releases_path'])
if deploy_helper.shared_path:
changes += deploy_helper.create_path(facts['shared_path'])
result['ansible_facts'] = {'deploy_helper': facts}
elif deploy_helper.state == 'finalize':
if not deploy_helper.release:
module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
if deploy_helper.keep_releases <= 0:
module.fail_json(msg="'keep_releases' should be at least 1")
changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
if deploy_helper.clean:
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'clean':
changes += deploy_helper.remove_unfinished_link(facts['project_path'])
changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
elif deploy_helper.state == 'absent':
# destroy the facts
result['ansible_facts'] = {'deploy_helper': []}
changes += deploy_helper.delete_path(facts['project_path'])
if changes > 0:
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Jollytown/Garuda | server/garuda/lib/python2.7/site-packages/setuptools/tests/test_egg_info.py | 148 | 2744 | import os
import stat
import pytest
from . import environment
from .textwrap import DALS
from . import contexts
class TestEggInfo:
setup_script = DALS("""
from setuptools import setup
setup(
name='foo',
py_modules=['hello'],
entry_points={'console_scripts': ['hi = hello.run']},
zip_safe=False,
)
""")
def _create_project(self):
with open('setup.py', 'w') as f:
f.write(self.setup_script)
with open('hello.py', 'w') as f:
f.write(DALS("""
def run():
print('hello')
"""))
@pytest.yield_fixture
def env(self):
class Environment(str): pass
with contexts.tempdir(prefix='setuptools-test.') as env_dir:
env = Environment(env_dir)
os.chmod(env_dir, stat.S_IRWXU)
subs = 'home', 'lib', 'scripts', 'data', 'egg-base'
env.paths = dict(
(dirname, os.path.join(env_dir, dirname))
for dirname in subs
)
list(map(os.mkdir, env.paths.values()))
config = os.path.join(env.paths['home'], '.pydistutils.cfg')
with open(config, 'w') as f:
f.write(DALS("""
[egg_info]
egg-base = %(egg-base)s
""" % env.paths
))
yield env
def test_egg_base_installed_egg_info(self, tmpdir_cwd, env):
self._create_project()
environ = os.environ.copy().update(
HOME=env.paths['home'],
)
cmd = [
'install',
'--home', env.paths['home'],
'--install-lib', env.paths['lib'],
'--install-scripts', env.paths['scripts'],
'--install-data', env.paths['data'],
]
code, data = environment.run_setup_py(
cmd=cmd,
pypath=os.pathsep.join([env.paths['lib'], str(tmpdir_cwd)]),
data_stream=1,
env=environ,
)
if code:
raise AssertionError(data)
actual = self._find_egg_info_files(env.paths['lib'])
expected = [
'PKG-INFO',
'SOURCES.txt',
'dependency_links.txt',
'entry_points.txt',
'not-zip-safe',
'top_level.txt',
]
assert sorted(actual) == expected
def _find_egg_info_files(self, root):
results = (
filenames
for dirpath, dirnames, filenames in os.walk(root)
if os.path.basename(dirpath) == 'EGG-INFO'
)
# expect exactly one result
result, = results
return result
| mit |
coronary/RandomEpisode | depends/Lib/site-packages/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| mit |
dvitme/odoo-addons | partner_internal_code/res_partner.py | 5 | 1356 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
class partner(models.Model):
""""""
_inherit = 'res.partner'
internal_code = fields.Char(
'Internal Code')
def name_search(self, cr, uid, name, args=None,
operator='ilike', context=None, limit=100):
args = args or []
res = []
if name:
recs = self.search(
cr, uid, [('internal_code', operator, name)] + args,
limit=limit, context=context)
res = self.name_get(cr, uid, recs)
res += super(partner, self).name_search(
cr, uid,
name=name, args=args, operator=operator, limit=limit)
return res
@api.model
def create(self, vals):
if not vals.get('internal_code', False):
vals['internal_code'] = self.env[
'ir.sequence'].get('partner.internal.code') or '/'
return super(partner, self).create(vals)
_sql_constraints = {
('internal_code_uniq', 'unique(internal_code)',
'Internal Code mast be unique!')
}
| agpl-3.0 |
fw1121/danmaku-project | server-data/web2py/applications/wanketv/controllers/live.py | 16 | 15346 | import json
import time
dal = DAL('sqlite://wanke.sqlite3.sqlite')
"""
获取推荐页顶部互动的广告信息
http://54.64.105.44/wanketv/live/ads
"""
def ads():
parseRequest()
db = DAL('sqlite://wanke.sqlite3.sqlite')
allAds = db.executesql("select * from ads order by _id")
result = {}
result["error"] = 0
jsonAds = []
for ad in allAds:
jsonAd = {}
jsonAd["id"] = ad[1]
jsonAd["title"] = ad[2]
jsonAd["cover"] = ad[3]
jsonAd["roomId"] = ad[4]
jsonAds.append(jsonAd)
result["data"] = jsonAds
return json.dumps(result)
"""
获取当前支持直播的游戏列表
http://54.64.105.44/wanketv/live/games
"""
def games():
db = DAL('sqlite://wanke.sqlite3.sqlite')
allGames = db.executesql("select * from games order by gameIndex")
result = {}
result["error"] = 0
jsonGames = []
for game in allGames:
jsonGame = {}
jsonGame["gameId"] = game[1]
jsonGame["gameName"] = game[2]
jsonGame["gameCover"] = game[4]
jsonGames.append(jsonGame)
result["data"] = jsonGames
return json.dumps(result)
"""
获取某款游戏的热门直播列表
http://54.64.105.44/wanketv/live/recommend?gameId=2&offset=0&limit=4
http://54.64.105.44/wanketv/live/recommend?offset=1&limit=20
recommend?gameId=2&offset=0&limit=4
获取room id的第offset页,每页4个
limit 默认值为20
offset 默认值为0
gameId 如果没有,不按照游戏进行分类返回
"""
def recommend():
parseRequest()
gameId = request.vars.get("gameId", "")
limit = int(request.vars.get("limit", 20))
offset = int(request.vars.get("offset", 0))
debug = request.vars.get("debug", "")
if len(debug) > 0:
gameId = ""
db = DAL('sqlite://wanke.sqlite3.sqlite')
allRooms = []
if len(gameId) == 0:
# 返回所有游戏中最热门的直播频道
allRooms = db.executesql("select * from live_channels order by _id")
else:
sql = "select * from live_channels where gameId=%s order by _id"%gameId
allRooms = db.executesql(sql)
result = {}
result["error"] = 0
index = 0
jsonRooms = []
for room in allRooms:
if index < limit*offset:
index += 1
continue
if index >= limit*(offset+1):
break
jsonRoom = {}
jsonRoom["roomId"] = room[1]
jsonRoom["roomName"] = room[2]
jsonRoom["roomCover"] = room[3]
jsonRoom["gameId"] = room[5]
jsonRoom["gameName"] = room[6]
jsonRoom["ownerNickname"] = room[11]
jsonRoom["online"] = room[12]
jsonRoom["fans"] = room[13]
jsonRooms.append(jsonRoom)
index += 1
result["data"] = jsonRooms
return json.dumps(result)
"""
获取某个房间的详细信息
http://54.64.105.44/wanketv/live/channel?roomId=7
http://54.64.105.44/wanketv/live/channel?roomId=7&uid=1
channel?roomId=7&uid=1
roomId: 如果没有,返回空
uid: 如果有,在返回结果中加入该用户是否订阅了该房间的字段
"""
def channel():
parseRequest()
roomId = request.vars.get("roomId", "")
uid = request.vars.get("uid", "")
db = DAL('sqlite://wanke.sqlite3.sqlite')
allRooms = []
result = ""
if len(roomId) > 0:
sql = "select * from live_channels where roomId=%s order by _id"%roomId
allRooms = db.executesql(sql)
room = None
if len(allRooms) >= 1:
room = allRooms[0]
if room != None:
jsonRoom = {}
jsonRoom["roomId"] = room[1]
jsonRoom["roomName"] = room[2]
jsonRoom["roomCover"] = room[3]
jsonRoom["gameId"] = room[5]
jsonRoom["gameName"] = room[6]
jsonRoom["ownerUid"] = room[9]
jsonRoom["ownerNickname"] = room[11]
jsonRoom["online"] = room[12]
jsonRoom["fans"] = room[13]
jsonRoom["detail"] = room[14]
if len(uid) > 0:
sql = "select subscribes from subscribe where uid=%s"%uid
subscribes = db.executesql(sql)
subscribe = None
if len(subscribes) >= 1:
subscribe = subscribes[0][0]
if subscribe != None:
sset = set(subscribe.split(":"))
if roomId in sset:
jsonRoom["subscribed"] = True
else:
jsonRoom["subscribed"] = False
result = json.dumps(jsonRoom)
return result
import os
"""
http://54.64.105.44/wanketv/live/version?type=android
获取当前版本更新信息
type: ios或者android
"""
def version():
parseRequest()
platform = request.vars.get("type", "")
result = {}
result["error"] = 0
if len(platform) == 0:
return
result["error"] = 0
result["version"] = "1.0.1"
result["forceUpdate"] = False
result["updateShortLog"] = "这是一次假的更新。\nhaha ~~"
result["updateDetailLog"] = "http://www.baidu.com"
result["downUrl"] = "http://www.baidu.com"
return json.dumps(result)
"""
取消对房间的订阅
http://54.64.105.44/wanketv/live/unsubscribe?roomId=7&uid=1
unsubscribe?roomId=7&uid=1
roomId: 需要取消订阅的房间号
uid: 用户uid,不能为空
roomIds: 使用;对roomId进行分割,同时取消订阅多个房间号使用,优先级高于roomId
all: true or false, 这个优先级高于roomIds, 如果该值被设置为true, 删除该用户的所有订阅消息
"""
def unsubscribe():
parseRequest()
uid = request.vars.get("uid", "")
roomId = request.vars.get("roomId", "")
roomIds = request.vars.get("roomIds", "").split(":")
unsubscribeAll = request.vars.get("all", "false")
print roomIds
result = {}
result["error"] = 1
if unsubscribeAll.lower() == "true":
# 删除所有的订阅
try:
sql = 'update subscribe set subscribes="%s" where uid=%s'%("", uid)
dal.executesql(sql)
result["error"] = 0
except Exception, e:
result["msg"] = e.message
else:
if len(roomIds) == 0:
roomIds.add(roomId)
sql = "select subscribes from subscribe where uid=%s"%uid
subscribes = dal.executesql(sql)
subscribe = None
if len(subscribes) >= 1:
subscribe = subscribes[0][0]
if subscribe != None:
sset = set(subscribe.split(":"))
try:
for tempId in roomIds:
try:
sset.remove(tempId)
except Exception, e:
pass
# update
sql = 'update subscribe set subscribes="%s" where uid=%s'%(":".join(list(sset)), uid)
dal.executesql(sql)
result["error"] = 0
except Exception, e:
result["msg"] = e.message
return json.dumps(result)
"""
订阅某个房间
http://54.64.105.44/wanketv/live/subscribe?roomId=7&uid=1
subscribe?roomId=7&uid=1
roomId: 需要订阅的房间号
uid: 用户uid
"""
def subscribe():
parseRequest()
uid = request.vars.get("uid", "")
roomId = request.vars.get("roomId", "")
if len(uid) == 0 or len(roomId) == 0:
return ""
db = DAL('sqlite://wanke.sqlite3.sqlite')
sql = "select subscribes from subscribe where uid=%s"%uid
subscribes = db.executesql(sql)
subscribe = None
if len(subscribes) >= 1:
subscribe = subscribes[0][0]
if subscribe != None:
sset = set(subscribe.split(":"))
sset.add(roomId)
# update
sql = 'update subscribe set subscribes="%s" where uid=%s'%(":".join(list(sset)), uid)
else:
# insert
sql = 'insert into subscribe (uid, subscribes) VALUES (%s, "%s")'%(uid, roomId)
db.executesql(sql)
result = {}
result["error"] = 0
return json.dumps(result)
"""
登录操作
http://54.64.105.44/wanketv/live/login?username=root&password=1
login?username=root&password=1
username: 注册用户名
passwrod: 登录密码
"""
def login():
time.sleep(2)
parseRequest()
username = request.vars.get("username", "")
password = request.vars.get("password", "")
"""
返回值:
0: 登录成功
1: 用户名或密码错误
"""
result = {}
result["error"] = 1
result["msg"] = "用户名或密码错误"
if len(username) > 0 and len(password) > 0:
try:
sql = 'select password, uid from account where username="%s"'%username
selectResults = dal.executesql(sql)
if len(selectResults) > 0:
dbPassword = selectResults[0][0]
if dbPassword == password:
result["error"] = 0
result["msg"] = ""
result["username"] = username
result["uid"] = selectResults[0][1]
result["avatar"] = "album_"+str(selectResults[0][1])+'.png'
except Exception, e:
result["error"] = 1
return json.dumps(result)
"""
注册新用户
http://54.64.105.44/wanketv/live/register?username=2121&password=1&email=123123@gmail.com
register?username=2121&password=1&email=123123@gmail.com
username: 注册用户名
passwrod: 登录密码
email: 注册邮箱
"""
def register():
time.sleep(2)
parseRequest()
username = request.vars.get("username", "")
password = request.vars.get("password", "")
email = request.vars.get("email", "")
"""
返回值:
0: 注册成功
1: 参数格式错误
2: 用户名已存在
"""
result = {}
if len(username) == 0 or len(password) == 0 or len(email) == 0:
result["error"] = 1
result["msg"] = "参数格式错误!"
else:
try:
sql = 'insert into account (username, password, email) VALUES ("%s", "%s", "%s")'%(username, password, email)
dal.executesql(sql)
result["error"] = 0
result["msg"] = ""
except Exception, e:
result["error"] = 2
result["msg"] = "用户名已存在!"
return json.dumps(result)
"""
获取某人的资料信息
http://54.64.105.44/wanketv/live/userInfo?uid=2121
userInfo?uid=2121
uid: 注册用户的uid
"""
def userInfo():
parseRequest()
uid = request.vars.get("uid", "")
result = {}
result["error"] = 0
result["msg"] = ""
if len(uid) == 0:
result["error"] = 1
result["msg"] = "参数格式错误!"
else:
try:
sql = 'select * from account where uid="%s"'%uid
selectResults = dal.executesql(sql)
if len(selectResults) > 0:
info = selectResults[0]
result["uid"] = info[0]
result["username"] = info[1]
# result["password"] = info[2]
result["email"] = info[3]
result["exp"] = info[4]
result["fans"] = info[5]
result["gender"] = info[6]
except Exception, e:
result["error"] = 2
result["msg"] = "用户名已存在!"
return json.dumps(result)
"""
意见反馈接口
http://54.64.105.44/wanketv/live/feedback?uid=2121&content=fasfasfasfasfasdf
feedback?uid=2121&content=fasfasfasfasfasdf
uid: 注册用户的uid,可选
content: 用户反馈的意见,不能为空
"""
def feedback():
parseRequest()
uid = request.vars.get("uid", "")
content = request.vars.get("content", "")
result = {}
result["error"] = 0
result["msg"] = ""
time.sleep(2)
if len(content) < 10:
result["error"] = 1
result["msg"] = "意见字数太少!"
else:
pass
return json.dumps(result)
"""
获取用户关注的直播频道数据
http://54.64.105.44/wanketv/live/fav?uid=1
fav?uid=2121
"""
def fav():
parseRequest()
uid = request.vars.get("uid", "")
result = {}
result["error"] = 0
result["msg"] = ""
if len(uid) > 0:
try:
sql = 'select subscribes from subscribe where uid="%s"'%uid
selectResults = dal.executesql(sql)
if len(selectResults) > 0:
subscribesInfo = selectResults[0][0]
data = []
for roomId in subscribesInfo.split(":"):
subscribe = {}
subscribe["roomId"] = roomId
data.append(subscribe)
"""
uid, avatar, username, fans
"""
try:
sql = 'select ownerUid, ownerNickname, fans from live_channels where roomId=%s'%roomId
selectResults = dal.executesql(sql)
if len(selectResults) > 0:
subscribe["uid"] = selectResults[0][0]
subscribe["avatar"] = str(selectResults[0][0])+".png"
subscribe["username"] = selectResults[0][1]
subscribe["fans"] = selectResults[0][2]
except Exception, e:
raise e
result["data"] = data
except Exception, e:
pass
return json.dumps(result)
"""
获取当前弹幕热词
"""
def danmaku():
global hotDanmakus
parseRequest()
sql = 'CREATE TABLE IF NOT EXISTS hot_danmakus (content TEXT NOT NULL )'
dal.executesql(sql)
result = {}
result["error"] = 0
result["msg"] = ""
danmakuContent = request.vars.get("add", "")
if len(danmakuContent) > 0:
sql = 'insert into hot_danmakus (content) VALUES ("%s")'%danmakuContent
dal.executesql(sql)
else:
sql = 'select * from hot_danmakus'
selectResults = dal.executesql(sql)
datas = []
if len(selectResults) > 0:
for i in selectResults:
datas.append(i[0])
result["data"] = datas
return json.dumps(result)
"""
获取图片
"""
def imgfile():
filename = request.vars.get("id", "")
if len(filename) == 0:
return ""
print filename
# redirect(URL("/static/images/cover/"+filename))
filepath = os.path.join(os.getcwd(), "applications")
filepath = os.path.join(filepath, "wanketv")
filepath = os.path.join(filepath, "static")
filepath = os.path.join(filepath, "images")
filepath = os.path.join(filepath, "cover")
filepath = os.path.join(filepath, filename)
url = "http://192.168.41.101:9257/wanketv/static/images/cover/"+filename
redirect(url)
def parseRequest():
# 发起请求客户端的操作系统类型
platform = ""
if request.vars.has_key("platform"):
platform = request.vars.get("platform")
# 发起请求客户端的操作系统版本
os = ""
if request.vars.has_key("os"):
os = request.vars.get("os")
# 发起请求客户端的版本
version = ""
if request.vars.has_key("version"):
version = request.vars.get("version")
token = ""
if request.vars.has_key("token"):
token = request.vars.get("token")
return platform, os, version, token
| apache-2.0 |
vortex-ape/scikit-learn | benchmarks/bench_covertype.py | 4 | 7382 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.utils import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, max_iter=1000, tol=1e-3),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
film42/heroku-buildpack-python-wkhtmltopdf | vendor/distribute-0.6.36/setuptools/tests/test_markerlib.py | 71 | 2237 | import os
import unittest
from setuptools.tests.py26compat import skipIf
try:
import ast
except ImportError:
pass
class TestMarkerlib(unittest.TestCase):
@skipIf('ast' not in globals(),
"ast not available (Python < 2.6?)")
def test_markers(self):
from _markerlib import interpret, default_environment, compile
os_name = os.name
self.assertTrue(interpret(""))
self.assertTrue(interpret("os.name != 'buuuu'"))
self.assertTrue(interpret("python_version > '1.0'"))
self.assertTrue(interpret("python_version < '5.0'"))
self.assertTrue(interpret("python_version <= '5.0'"))
self.assertTrue(interpret("python_version >= '1.0'"))
self.assertTrue(interpret("'%s' in os.name" % os_name))
self.assertTrue(interpret("'buuuu' not in os.name"))
self.assertFalse(interpret("os.name == 'buuuu'"))
self.assertFalse(interpret("python_version < '1.0'"))
self.assertFalse(interpret("python_version > '5.0'"))
self.assertFalse(interpret("python_version >= '5.0'"))
self.assertFalse(interpret("python_version <= '1.0'"))
self.assertFalse(interpret("'%s' not in os.name" % os_name))
self.assertFalse(interpret("'buuuu' in os.name and python_version >= '5.0'"))
environment = default_environment()
environment['extra'] = 'test'
self.assertTrue(interpret("extra == 'test'", environment))
self.assertFalse(interpret("extra == 'doc'", environment))
def raises_nameError():
try:
interpret("python.version == '42'")
except NameError:
pass
else:
raise Exception("Expected NameError")
raises_nameError()
def raises_syntaxError():
try:
interpret("(x for x in (4,))")
except SyntaxError:
pass
else:
raise Exception("Expected SyntaxError")
raises_syntaxError()
statement = "python_version == '5'"
self.assertEqual(compile(statement).__doc__, statement)
| mit |
gaocegege/treadmill | treadmill/vipfile.py | 3 | 4114 | """Manage Treadmill vIPs allocations"""
import errno
import glob
import logging
import os
from . import fs
_LOGGER = logging.getLogger(__name__)
class VipMgr(object):
"""VIP allocation manager.
:param basepath:
Base directory that will contain all the allocated VIPs.
:type basepath:
``str``
"""
__slots__ = (
'_base_path',
'_owner_path',
)
def __init__(self, path, owner_path):
# Make sure vips directory exists.
fs.mkdir_safe(path)
self._base_path = os.path.realpath(path)
self._owner_path = os.path.realpath(owner_path)
def initialize(self):
"""Initialize the vip folder."""
map(os.unlink, glob.glob(os.path.join(self._base_path, '*')))
def alloc(self, owner, picked_ip=None):
"""Atomically allocates virtual IP pair for the container.
"""
if picked_ip is not None:
if not self._alloc(owner, picked_ip):
raise Exception('Unabled to assign IP %r for %r',
picked_ip, owner)
return picked_ip
for index in range(0, 256**2):
major, minor = (index >> 8), (index % 256)
if major in [128, 256]:
continue
if minor in [0, 256]:
continue
ip = '192.168.{major}.{minor}'.format(major=major, minor=minor)
if not self._alloc(owner, ip):
continue
# We were able to grab the IP.
break
else:
raise Exception('Unabled to find free IP for %r', owner)
return ip
def free(self, owner, owned_ip):
"""Atomically frees virtual IP associated with the container.
"""
path = os.path.join(self._base_path, owned_ip)
try:
ip_owner = os.path.basename(os.readlink(path))
if ip_owner != owner:
_LOGGER.critical('%r tried to free %r that it does not own',
owner, owned_ip)
return
os.unlink(path)
_LOGGER.debug('Freed %r', owned_ip)
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.exception('Freed unallocated ip %r', owned_ip)
else:
raise
def garbage_collect(self):
"""Garbage collect all VIPs without owner.
"""
allocated = glob.glob(
os.path.join(self._base_path, '*')
)
for link in allocated:
try:
_link_st = os.stat(link) # noqa: F841
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.warning('Reclaimed: %r', link)
try:
os.unlink(link)
except OSError as err:
if err.errno == errno.ENOENT:
pass
else:
raise
else:
raise
def list(self):
"""List all allocated IPs and their owner
"""
ips = []
allocated = glob.glob(
os.path.join(self._base_path, '*')
)
for link in allocated:
try:
ip_owner = os.readlink(link)
except OSError as err:
if err.errno == errno.EINVAL:
# not a link
continue
raise
ips.append((os.path.basename(link), os.path.basename(ip_owner)))
return ips
def _alloc(self, owner, new_ip):
"""Atomaticly grab an IP for an owner.
"""
ip_file = os.path.join(self._base_path, new_ip)
owner_file = os.path.join(self._owner_path, owner)
try:
os.symlink(os.path.relpath(owner_file, self._base_path), ip_file)
_LOGGER.debug('Allocated %r for %r', new_ip, owner)
except OSError as err:
if err.errno == errno.EEXIST:
return False
raise
return True
| apache-2.0 |
jakspacek/tableHIFU | Hardware/InstrumentManager.py | 1 | 8563 | from __future__ import print_function, absolute_import
import time
import logging
from threading import Thread
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
import Hardware.ampfunctions as AmpLib
import Hardware.fcgenfunctions as FCGenLib
import Hardware.powermeterfunctions as PMeterLib
#import Hardware.motorfunctions as MotorsLib
# ***Add motors later - we dont have a compiled binary on this laptop.
# ORGANIZATION TIPS FOR MYSELF. -------------
# Server takes exactly one command via the socket. (Suppose it's a non-blocking one.)
# Delegates it here to the InstrumentManager. The InstrumentManager calls the appropriate
# function, then sends a return message (either instantly OR after the thing is complete.)
# It MUST run in a different thread so that the server is free to keep receiving stuff (Including halts).
class InstrumentManager(Thread):
def __init__(self, backchannel, queue):
Thread.__init__(self)
self.bRunning = False
self.queue = queue
self.backchannel = backchannel
self.instrumentsconnected = [False, False, False, False]
# **alphabetical. Amp, FC Gen, Motors, Powermeter.
self.bRunning = True
self.start()
def setSafetyRef(self, safetyobj):
self.Safety = safetyobj
def run(self):
while self.bRunning:
try:
function, kwargs = self.queue.get(timeout = 5.0)
eval('self.'+str(function)+'(**kwargs)')
except Empty: # just try again.
pass
def issueHandshake(self, message):
self.backchannel.write(message)
#===========================================================================
# Internal use functions. Mostly setup.
def fcgen_init(self, **kwargs):
self.FCGen = FCGenLib.FUNC_Connection('172.16.3.241')
if not self.FCGen:
logger.error('Function generator connection was not made. Sending error message to client.')
self.issueHandshake('uh oh')
else:
FCGenLib.FUNC_Initialization(self.FCGen, '1e6', '110mV')
self.issueHandshake('sure i guess')
self.instrumentsconnected[1]=True
def amp_init(self, **kwargs):
self.Amp = AmpLib.Amplifier_Connection('COM6', 19200, 5)
if not self.Amp:
logger.error('Amplifier connection was not made. Sending error message to client.')
self.issueHandshake('uh oh')
else:
AmpLib.Amplifier_Initialization(self.Amp)
AmpLib.Amplifier_SetGain(self.Amp)
self.issueHandshake(str((AmpLib.Amplifier_GetGain(self.Amp),
AmpLib.Amplifier_GetStatus(self.Amp))))
self.instrumentsconnected[0] = True
def pmeter_init(self, **kwargs):
self.PMeter = PMeterLib.Power_Meter_Connection('COM3', 9600, 2)
if not self.PMeter:
logger.error('Power meter connection was not made. Sending error message to client.')
self.issueHandshake('uh oh')
else:
PMeterLib.Power_Meter_Initialization(self.PMeter)
self.instrumentsconnected[3] = True
self.Safety._queryagainflag = True
self.issueHandshake('go ahead')
def pmeter_calibrate_zeroboth(self, **kwargs):
self.PMeter.write('CAL1:ZERO:AUTO ONCE\n')
time.sleep(12)
self.PMeter.write('CAL2:ZERO:AUTO ONCE\n')
time.sleep(12)
self.issueHandshake('go ahead')
def pmeter_calibrate_cal1(self, **kwargs):
self.PMeter.write('CAL1:AUTO ONCE\n')
time.sleep(12)
self.issueHandshake('go ahead')
def pmeter_calibrate_cal2(self, **kwargs):
self.PMeter.write('CAL2:AUTO ONCE\n')
time.sleep(12)
self.issueHandshake('go ahead')
def pmeter_send_to_safety(self):
vals = PMeterLib.Power_Meter_Reading(self.PMeter)
self.Safety._lastpowerreadings = vals
self.Safety._queryagainflag = True
def motors_init(self, **kwargs):
self.Motors = motorcontrol.TableMotorController()
if not self.Motors:
logger.error('Motor table connection was not made. Sending error message to client.')
self.issueHandshake('uh oh')
else:
self.Motors.init_axes()
self.issueHandshake('go ahead :)')
def motors_set_focus(self, xval, yval, **kwargs):
Motors.set_focus(xval, yval)
self.issueHandshake('Done')
def motors_homing(self, **kwargs):
Motors.stop_motors()
Motors.orientation('Supine', 'Head First')
p = Motors.read_current_position()
# move Y axis
Motors.set_initial_focus(p[0], p[1])
Motors.set_focus(p[0],-11.17)
Motors.move_to_target(p[0],0)
# move X axis
p = Motors.read_current_position()
Motors.set_focus(-25.3,p[1])
Motors.move_to_target(0,p[1])
Motors.stop_motors()
p = Motors.read_current_position()
if p[0] < 0.5 and p[0] > -0.5 and p[1] < 0.5 and p[1] > -0.5:
# in range.
self.issueHandshake('Homing done OK')
else:
self.issueHandshake('Homing NOT done OK')
def emergency_close(self):
logger.info('emergency_close invoked.')
#first try and reconnect to instruments in case there was a disrupted connection.
if self.instrumentsconnected[0]:
try:
newamp = AmpLib.Amplifier_Connection('COM6', 19200, 5)
print(newamp)
except:
raise
#shut off the output.
AmpLib.Amplifier_RfTurnOff(self.Amp)
AmpLib.Amplifier_ConnectionOff(self.Amp)
def clean_close(self):
logger.info('clean_close invoked.')
if self.instrumentsconnected[0]:
self.Amp.close()
if self.instrumentsconnected[1]:
self.FCGen.clear()
self.FCGen.close()
if self.instrumentsconnected[2]:
self.Motors.stop_motors()
# no real way to disconnect, weirdly.
if self.instrumentsconnected[3]:
self.PMeter.close()
self.instrumentsconnected = [False, False, False, False]
self.bRunning = False
#===========================================================================
# Exposed Functions.
def move_blocking(self, xpos, ypos, **kwargs):
logger.debug('move-blocking in the instrument manager.')
self.issueHandshake('OK')
def move_async(self, xpos, ypos, **kwargs):
pass
def exposure_timed(self, amplitude, duration, **kwargs):
"Note that halting signal is generated by an InstruManager command, "
"So we can NOT run this in the main thread!!! or else the halt never gets"
"pushed. "
self.Safety._sonicating = True # turn on spike detection.
t = Thread(target = self.__doTimedExposure, args = (float(duration), amplitude, self.backchannel))
t.start()
def __doTimedExposure(self, duration, amplitude, backchannel):
starttime = time.time()
FCGenLib.FUNC_TurnOn(self.FCGen)
FCGenLib.FUNC_SetAmplitude(self.FCGen, amplitude)
while self.bRunning and time.time() < starttime + duration:
pass
if not self.bRunning: print('stopped prematurely!')
FCGenLib.FUNC_TurnOff(self.FCGen)
def update_voltage(self, amplitude, **kwargs):
"The PID controller has decided a new voltage. Update the "
"Note that we're relying on the Safety thread to catch issues - there "
"will be NO timing done here...!"
FCGenLib.FUNC_SetAmplitude(self.FCGen, amplitude)
self.issueHandshake('done')
def request_meter_readings(self, **kwargs):
"Ask the power meter, send the numbers by backchannel. I guess."
pass
def request_table_position(self, **kwargs):
"Send the answer by backchannel."
self.issueHandshake(self.Motors.read_current_position())
def request_instrument_statuses(self, **kwargs):
"Return the instruments that were actually configured."
"Send them by backchannel as JSON, I guess."
self.issueHandshake('they exist in the current universe.')
def refresh(self, **kwargs):
"This should actually be a NOP. The actual refreshing is done way earlier."
pass
def halt(self, **kwargs):
# end the instrument management thread.
# halt other things too.
self.bRunning = False
self.clean_close()
| gpl-3.0 |
soporteensitech/Asami | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| unlicense |
jskDr/jamespy_py3 | wireless/nb_polar_r12.py | 1 | 20207 | import numpy as np
import numba as nb
import matplotlib.pyplot as plt
def calc_ber(e_array):
return np.mean(np.abs(e_array))
# Imitate static variable for a python function using decorate and setattr
def static_vars(**kwargs):
'''
@static_vars(counter=0)
def foo():
foo.counter += 1
print("Counter is %d" % foo.counter)
'''
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@nb.jit
def encode(u1, u2):
return (u1 + u2) % 2, u2
@nb.jit
def f_neg(a, b):
return np.log((np.exp(a + b) + 1)/(np.exp(a)+np.exp(b)))
@nb.jit
def f_pos(a, b, u):
return (-1)**u*a + b
@nb.jit
def decode(y1, y2):
l1 = f_neg(y1, y2)
u1_hard = 0 if l1 > 0 else 1
l2 = f_pos(y1, y2, u1_hard)
u2_hard = 0 if l2 > 0 else 1
return u1_hard, u2_hard, l1, l2
@nb.jit
def channel(x1, x2):
y1 = 1.0 - x1*2
y2 = 1.0 - x2*2
return y1, y2
@nb.jit
def coding(u1, u2):
x1, x2 = encode(u1, u2)
y1, y2 = channel(x1, x2)
u1_, u2_, _, _ = decode(y1, y2)
e1, e2 = u1 - u1_, u2 - u2_
return e1, e2
@nb.jit
def coding_array(u_array, e_array):
for i in range(len(u_array)):
e_array[i,0], e_array[i,1] = coding(u_array[i,0], u_array[i,1])
def run_coding():
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = np.zeros_like(u_array)
coding_array(u_array, e_array)
print(e_array)
@nb.jit
def channel_awgn(x1, x2, SNRdB):
SNR = np.power(10, SNRdB/10)
Nsig = 1/np.sqrt(SNR)
n1 = np.random.normal(0) * Nsig
n2 = np.random.normal(0) * Nsig
y1 = 1.0 - x1*2 + n1
y2 = 1.0 - x2*2 + n2
return y1, y2
@nb.jit
def coding_awgn(u1, u2, SNRdB):
x1, x2 = encode(u1, u2)
y1, y2 = channel_awgn(x1, x2, SNRdB)
u1_, u2_, _, _ = decode(y1, y2)
e1, e2 = u1 - u1_, u2 - u2_
return e1, e2
@nb.jit
def coding_array_awgn(u_array, e_array, SNRdB):
for i in range(len(u_array)):
e_array[i,0], e_array[i,1] = coding_awgn(u_array[i,0], u_array[i,1], SNRdB)
def run_coding_awgn(SNRdB=10):
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = np.zeros_like(u_array)
coding_array_awgn(u_array, e_array, SNRdB)
# print(e_array)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
return BER
def main_run_coding_awgn(SNRdB_list=list(range(10))):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_awgn(SNRdB)
BER_list.append(BER)
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run_coding_awgn_tile(SNRdB=10, Ntile=1):
u_array_unit = np.array([(1,1), (1,0), (0,1), (0,0)])
u_array = np.tile(u_array_unit, (Ntile, 1))
e_array = np.zeros_like(u_array)
coding_array_awgn(u_array, e_array, SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
return BER
def main_run_coding_awgn_tile(SNRdB_list=list(range(10)), Ntile=1, flag_fig=False):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_awgn_tile(SNRdB, Ntile)
BER_list.append(BER)
if flag_fig:
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
@nb.jit
def encode_array(u_array):
x_array = np.zeros_like(u_array)
for i in range(len(u_array)):
x_array[i,0], x_array[i,1] = encode(u_array[i,0], u_array[i,1])
return x_array
@nb.jit
def channel_array(x_array):
y_array = np.zeros(x_array.shape, dtype=nb.float_)
for i in range(len(x_array)):
y_array[i,0], y_array[i,1] = channel(x_array[i,0], x_array[i,1])
return y_array
@nb.jit
def decode_array(y_array):
ud_array = np.zeros(y_array.shape, dtype=nb.int_)
for i in range(len(y_array)):
ud_array[i,0], ud_array[i,1], _, _ = decode(y_array[i,0], y_array[i,1])
return ud_array
@nb.jit
def coding_array_all(u_array):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_array(x_array)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
def run_coding_array_all():
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = coding_array_all(u_array)
print(e_array)
@nb.jit
def channel_array_awgn(x_array, SNRdB):
y_array = np.zeros(x_array.shape, dtype=nb.float_)
for i in range(len(x_array)):
y_array[i,0], y_array[i,1] = channel_awgn(x_array[i,0], x_array[i,1], SNRdB)
return y_array
@nb.jit
def _coding_array_all_awgn(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_array_awgn(x_array, SNRdB)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
def run_coding_array_all_awgn(SNRdB=10):
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = coding_array_all_awgn(u_array, SNRdB=SNRdB)
print(e_array)
def run_coding_array_all_awgn_tile(SNRdB=10, Ntile=1):
u_array_unit = np.array([(1,1), (1,0), (0,1), (0,0)])
u_array = np.tile(u_array_unit, (Ntile, 1))
e_array = coding_array_all_awgn(u_array, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
# print(BER)
return BER
def main_run_coding_array_all_awgn_tile(SNRdB_list=list(range(10)), Ntile=1, flag_fig=False):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_array_all_awgn_tile(SNRdB, Ntile)
BER_list.append(BER)
if flag_fig:
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
@nb.jit
def coding_array_all_awgn(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
@nb.jit
def channel_numpy_awgn(x_array, SNRdB):
"""
출력을 (0,1) --> (1,-1)로 바꾸고 가우시안 노이즈를 더함.
"""
#y_array = np.zeros(x_array.shape, dtype=nb.float_)
SNR = np.power(10, SNRdB/10)
noise_sig = 1/np.sqrt(SNR)
n_array = np.random.normal(0.0, noise_sig, size=x_array.shape)
y_array = 1.0 - x_array*2 + n_array
return y_array
# Usage list
# main_run_coding_awgn()
# run_coding_awgn()
# run_coding()
# N >= 2 Polar coding (Generalized)
@nb.jit
def encode_n(u):
"""
x = uBF(xn) where n = log(N), N=len(u), B is bit-reverse
"""
x = np.copy(u)
L = len(u)
if L != 1:
u1 = u[0::2]
u2 = u[1::2]
u1u2 = np.mod(u1 + u2, 2)
x[:L/2] = encode_n(u1u2)
x[L/2:] = encode_n(u2)
return x
@nb.jit
def encode_array_n(u_array):
x_array = np.zeros_like(u_array)
for i in range(len(u_array)):
x_array[i] = encode_n(u_array[i])
return x_array
@nb.jit
def f_neg_n(a, b):
return np.log((np.exp(a + b) + 1)/(np.exp(a)+np.exp(b)))
@nb.jit
def f_pos_n(a, b, u):
return (-1)**u*a + b
@nb.jit
def decode_n_r0(y_array):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_hard[:L/2] = decode_n(l1)
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_hard[:L/2])
u_hard[L/2:], x_hard[L/2:] = decode_n(l2)
x_hard[:L/2] = np.mod(x_hard[:L/2] + x_hard[L/2:], 2)
return u_hard, x_hard
@nb.jit
def decode_n(y_array):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_n(l1)
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_n(l2)
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_array_n(y_array):
ud_array = np.zeros(y_array.shape, dtype=nb.int_) #nb.int_)
for i in range(len(y_array)):
ud_array[i], _ = decode_n(y_array[i])
return ud_array
@nb.jit
def coding_array_all_awgn_n(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array_n(u_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ud_array = decode_array_n(y_array)
e_array = u_array - ud_array
return e_array
class PolarCode:
def __init__(self, N_code=2, K_code=2):
"""
N_code: Code block size
K_code: Information bit size
"""
self.N_code = N_code
self.K_code = K_code
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run(self,
SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
u_array = np.random.randint(2, size=(N_iter, self.N_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_n(u_array, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
if flag_fig:
self.plot(SNRdB_list, BER_list)
self.BER_list = BER_list
# ====================================================================
# Frozen을 고려하는 Polar Coding 시스템
# ====================================================================
@nb.jit
def _decode_frozen_n(y_array, frozen_flag_n):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
if frozen_flag_n[0]:
u_hard[0] = 0
else:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_frozen_n(l1, frozen_flag_n[:L/2])
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_frozen_n(l2, frozen_flag_n[L/2:])
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_frozen_n(y_array, frozen_flag_n):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
if frozen_flag_n[0]:
x_hard[0] = 0
else:
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_frozen_n(l1, frozen_flag_n[:L/2])
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_frozen_n(l2, frozen_flag_n[L/2:])
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_frozen_array_n(y_array, frozen_flag_n):
ud_array = np.zeros(y_array.shape, dtype=nb.int_)
for i in range(len(y_array)):
ud_array[i], _ = decode_frozen_n(y_array[i], frozen_flag_n)
return ud_array
@nb.jit
def frozen_encode_n(uf, u, f):
"""
Input:
uf: 길이 N_code인 코딩 블럭
u: 길이 K_code인 정보 블럭
f: 길이 N_code인 비트가 frozen인지 아닌지를 나타내는 벡터
"""
k = 0
for n in range(len(uf)):
if f[n]:
uf[n] = 0
else:
uf[n] = u[k]
k += 1
@nb.jit
def frozen_encode_array_n(u_array, frozen_flag_n):
N_iter = len(u_array)
N_code = len(frozen_flag_n)
uf_array = np.zeros(shape=(N_iter,N_code), dtype=nb.int_)
for i in range(N_iter):
frozen_encode_n(uf_array[i], u_array[i], frozen_flag_n)
return uf_array
@nb.jit
def frozen_decode_n(ud, ufd, f):
"""
Input:
ufd: 길이 N_code인 디코딩한 블럭
ud: 길이 K_code인 검출한 정보 블럭
f: 길이 N_code인 비트가 frozen인지 아닌지를 나타내는 벡터
"""
k = 0
for n in range(len(f)):
if f[n] == 0:
ud[k] = ufd[n]
k += 1
@nb.jit
def frozen_decode_array_n(ufd_array, frozen_flag_n):
N_iter = len(ufd_array)
N_code = len(frozen_flag_n)
K_code = N_code - np.sum(frozen_flag_n)
ud_array = np.zeros(shape=(N_iter,K_code), dtype=nb.int_)
for i in range(N_iter):
frozen_decode_n(ud_array[i], ufd_array[i], frozen_flag_n)
return ud_array
@nb.jit
def coding_array_all_awgn_frozen_n(u_array, frozen_flag_n, SNRdB=10):
e_array = np.zeros_like(u_array)
# encode를 하기 전과 decode 끝난 후에 frozen처리를 포함하면 됨.
# u_array는 길이가 K_code인 벡터들의 모임이고, uf_array는 길이가 N_code인 벡터들의 모임이다.
uf_array = frozen_encode_array_n(u_array, frozen_flag_n)
# encode_array_n()은 frozen 여부를 알 필요가 없음.
x_array = encode_array_n(uf_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ufd_array = decode_frozen_array_n(y_array, frozen_flag_n) # frozen을 고려한 함수로 변경되어야 함!
# ufd_array = decode_array_n(y_array)
ud_array = frozen_decode_array_n(ufd_array, frozen_flag_n)
e_array = u_array - ud_array
return e_array
class PolarCodeFrozen:
def __init__(self, N_code=2, K_code=2, frozen_flag='manual', frozen_flag_n=np.zeros(2,dtype=int)):
"""
N_code=4: Code block size
K_code=2: Information bit size
frozen_flag_n=[1,1,0,0]: 코드 블럭 안의 매 비트가 frozen인지 아닌지를 표시함. Frozen이면 1, 아니면 0임.
Frozen 아닌 비트들의 갯 수는 Code_K와 동일해야 함.
"""
if frozen_flag == 'auto':
frozen_flag_n = polar_design_bec(N_code=N_code, K_code=K_code)
print('Auto: frozen_flag_n =', frozen_flag_n)
assert N_code == len(frozen_flag_n)
assert N_code - K_code == np.sum(frozen_flag_n)
self.N_code = N_code
self.K_code = K_code
self.frozen_flag_n = frozen_flag_n
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run(self,
SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
# 정보 비트수느느 K_code가 되어야 함. 나머지는 frozen_flag_n에 따라 0로 채워야 함.
u_array = np.random.randint(2, size=(N_iter, self.K_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_frozen_n(u_array, frozen_flag_n=self.frozen_flag_n, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
if flag_fig:
self.plot(SNRdB_list, BER_list)
print("SNRdB_list, BER_list")
print(SNRdB_list, BER_list)
self.BER_list = BER_list
def _polar_bsc(N_code=4, p=0.11, N_iter=1000):
"""
(0,1)에 대한 원래 코드를 (1,-1)로 바꾸어서 p대신 2*p를 사용해 디자인했음.
Input:
p=0.11: 오류 확률을 넣는다. 그리고 p*100%의 심볼은 1로 오튜가 났다고 가정하고
0, 1에 상관없이 오류는 p만큼 모든 심볼에 들어가는 걸로 되어 있음.
Comments:
udhat는 frozen bit에 대한 실제 데이터를 결정한 값이다. 이 값은 통상 BER 계산시에는 사용되지 않는다.
frozen bit로 설정해 오류 역전파가 없다는 가정으로 각 채널의 성능 평가를 위해 사용한다.
"""
# 모든 비트를 frozen 시킴
f = np.ones(N_code, dtype=int)
biterrd = np.zeros(N_code)
for _ in range(N_iter):
# 정상 입력은 모두 0으로 가정함.
y = np.ones(N_code) - 2*p
y[np.random.rand(N_code)<p] = -1 + 2*p
ud_hat, _ = decode_frozen_n(y, f)
biterrd += ud_hat
biterrd /= N_iter
return biterrd
def polar_bsc(N_code=4, p=0.11, N_iter=1000):
"""
(0,1)에 대한 원래 코드를 (1,-1)로 바꾸어서 p대신 2*p를 사용해 디자인했음.
Input:
p=0.11: 오류 확률을 넣는다. 그리고 p*100%의 심볼은 1로 오튜가 났다고 가정하고
0, 1에 상관없이 오류는 p만큼 모든 심볼에 들어가는 걸로 되어 있음.
Comments:
udhat는 frozen bit에 대한 실제 데이터를 결정한 값이다. 이 값은 통상 BER 계산시에는 사용되지 않는다.
frozen bit로 설정해 오류 역전파가 없다는 가정으로 각 채널의 성능 평가를 위해 사용한다.
"""
# 모든 비트를 frozen 시킴
f = np.ones(N_code, dtype=int)
biterrd = np.zeros(N_code)
for _ in range(N_iter):
# 정상 입력은 모두 0으로 가정함.
y_bin = np.zeros(N_code) + p
y_bin[np.random.rand(N_code)<p] = 1 - p
ud_hat, _ = decode_frozen_n(1-2*y_bin, f)
biterrd += ud_hat
biterrd /= N_iter
return biterrd
@nb.jit
def polar_bec(N_code=4, erase_prob=0.5):
"""
BEC에 대해 Polar code의 예측 성능을 구한다. 단, 비트당 제거 오류율은 erase_prob로 가정한다.
"""
n = int(np.log2(N_code))
E = np.zeros(N_code)
# E_out = np.zeros(N_code)
E[0] = erase_prob
for i in range(n):
LN = 2**i
# print(i, LN)
# print('E in:', E)
# i stage에서 끝은 LN*2이다. 안그러면 broadcast되어 버린다.
E[LN:LN*2] = E[:LN] * E[:LN]
E[:LN] = 1-(1-E[:LN])*(1-E[:LN])
# print('E out:', E)
return E
def polar_design_bec(N_code=4, K_code=2, erase_prob=0.5):
"""
BEC일 경우에 각 비트의 오류율을 계산해 frozen_flag를 만든다.
"""
biterrd = polar_bec(N_code=N_code, erase_prob=erase_prob)
idx = np.argsort(biterrd)
frozen_flag_n = np.ones(N_code, dtype=int)
frozen_flag_n[idx[:K_code]] = 0
print('BER for each bit', biterrd)
return frozen_flag_n
if __name__ == '__main__':
# main_run_coding_awgn()
# main_run_coding_array_all_awgn_tile(Ntile=100000, flag_fig=True)
f = polar_design_bec(2,1)
print(f) | mit |
sekikn/incubator-airflow | tests/providers/google/cloud/hooks/test_functions.py | 7 | 15650 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import PropertyMock
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.functions import CloudFunctionsHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST,
get_open_mock,
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
GCF_LOCATION = 'location'
GCF_FUNCTION = 'function'
class TestFunctionHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id,
):
self.gcf_function_hook_no_project_id = CloudFunctionsHook(gcp_conn_id='test', api_version='v1')
@mock.patch("airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.functions.build")
def test_gcf_client_creation(self, mock_build, mock_authorize):
result = self.gcf_function_hook_no_project_id.get_conn()
mock_build.assert_called_once_with(
'cloudfunctions', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
self.assertEqual(mock_build.return_value, result)
self.assertEqual(self.gcf_function_hook_no_project_id._conn, result)
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._wait_for_operation_to_complete'
)
def test_create_new_function_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
create_method = (
get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.create
)
execute_method = create_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gcf_function_hook_no_project_id.create_new_function(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, location=GCF_LOCATION, body={}
)
self.assertIsNone(res)
create_method.assert_called_once_with(body={}, location='projects/example-project/locations/location')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name='operation_id')
@mock.patch('requests.put')
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
def test_upload_function_zip_overridden_project_id(self, get_conn, requests_put):
mck, open_module = get_open_mock()
with mock.patch(f'{open_module}.open', mck):
# fmt: off
generate_upload_url_method = get_conn.return_value.projects.return_value.locations. \
return_value.functions.return_value.generateUploadUrl
# fmt: on
execute_method = generate_upload_url_method.return_value.execute
execute_method.return_value = {"uploadUrl": "http://uploadHere"}
requests_put.return_value = None
res = self.gcf_function_hook_no_project_id.upload_function_zip(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, location=GCF_LOCATION, zip_path="/tmp/path.zip"
)
self.assertEqual("http://uploadHere", res)
generate_upload_url_method.assert_called_once_with(
parent='projects/example-project/locations/location'
)
execute_method.assert_called_once_with(num_retries=5)
requests_put.assert_called_once_with(
data=mock.ANY,
headers={'Content-type': 'application/zip', 'x-goog-content-length-range': '0,104857600'},
url='http://uploadHere',
)
class TestFunctionHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id,
):
self.gcf_function_hook = CloudFunctionsHook(gcp_conn_id='test', api_version='v1')
@mock.patch("airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.functions.build")
def test_gcf_client_creation(self, mock_build, mock_authorize):
result = self.gcf_function_hook.get_conn()
mock_build.assert_called_once_with(
'cloudfunctions', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
self.assertEqual(mock_build.return_value, result)
self.assertEqual(self.gcf_function_hook._conn, result)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._wait_for_operation_to_complete'
)
def test_create_new_function(self, wait_for_operation_to_complete, get_conn, mock_project_id):
create_method = (
get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.create
)
execute_method = create_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gcf_function_hook.create_new_function(
location=GCF_LOCATION,
body={},
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
self.assertIsNone(res)
create_method.assert_called_once_with(body={}, location='projects/example-project/locations/location')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name='operation_id')
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._wait_for_operation_to_complete'
)
def test_create_new_function_override_project_id(self, wait_for_operation_to_complete, get_conn):
create_method = (
get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.create
)
execute_method = create_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gcf_function_hook.create_new_function(
project_id='new-project', location=GCF_LOCATION, body={}
)
self.assertIsNone(res)
create_method.assert_called_once_with(body={}, location='projects/new-project/locations/location')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name='operation_id')
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
def test_get_function(self, get_conn):
get_method = (
get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.get
)
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "function"}
res = self.gcf_function_hook.get_function(name=GCF_FUNCTION)
self.assertIsNotNone(res)
self.assertEqual('function', res['name'])
get_method.assert_called_once_with(name='function')
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._wait_for_operation_to_complete'
)
def test_delete_function(self, wait_for_operation_to_complete, get_conn):
delete_method = (
get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.delete
)
execute_method = delete_method.return_value.execute
wait_for_operation_to_complete.return_value = None
execute_method.return_value = {"name": "operation_id"}
res = self.gcf_function_hook.delete_function( # pylint: disable=assignment-from-no-return
name=GCF_FUNCTION
)
self.assertIsNone(res)
delete_method.assert_called_once_with(name='function')
execute_method.assert_called_once_with(num_retries=5)
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook._wait_for_operation_to_complete'
)
def test_update_function(self, wait_for_operation_to_complete, get_conn):
patch_method = (
get_conn.return_value.projects.return_value.locations.return_value.functions.return_value.patch
)
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gcf_function_hook.update_function( # pylint: disable=assignment-from-no-return
update_mask=['a', 'b', 'c'], name=GCF_FUNCTION, body={}
)
self.assertIsNone(res)
patch_method.assert_called_once_with(body={}, name='function', updateMask='a,b,c')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name='operation_id')
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch('requests.put')
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
def test_upload_function_zip(self, get_conn, requests_put, mock_project_id):
mck, open_module = get_open_mock()
with mock.patch(f'{open_module}.open', mck):
# fmt: off
generate_upload_url_method = get_conn.return_value.projects.return_value.locations. \
return_value.functions.return_value.generateUploadUrl
# fmt: on
execute_method = generate_upload_url_method.return_value.execute
execute_method.return_value = {"uploadUrl": "http://uploadHere"}
requests_put.return_value = None
res = self.gcf_function_hook.upload_function_zip(
location=GCF_LOCATION,
zip_path="/tmp/path.zip",
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
self.assertEqual("http://uploadHere", res)
generate_upload_url_method.assert_called_once_with(
parent='projects/example-project/locations/location'
)
execute_method.assert_called_once_with(num_retries=5)
requests_put.assert_called_once_with(
data=mock.ANY,
headers={'Content-type': 'application/zip', 'x-goog-content-length-range': '0,104857600'},
url='http://uploadHere',
)
@mock.patch('requests.put')
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
def test_upload_function_zip_overridden_project_id(self, get_conn, requests_put):
mck, open_module = get_open_mock()
with mock.patch(f'{open_module}.open', mck):
# fmt: off
generate_upload_url_method = get_conn.return_value.projects.return_value.locations. \
return_value.functions.return_value.generateUploadUrl
# fmt: on
execute_method = generate_upload_url_method.return_value.execute
execute_method.return_value = {"uploadUrl": "http://uploadHere"}
requests_put.return_value = None
res = self.gcf_function_hook.upload_function_zip(
project_id='new-project', location=GCF_LOCATION, zip_path="/tmp/path.zip"
)
self.assertEqual("http://uploadHere", res)
generate_upload_url_method.assert_called_once_with(
parent='projects/new-project/locations/location'
)
execute_method.assert_called_once_with(num_retries=5)
requests_put.assert_called_once_with(
data=mock.ANY,
headers={'Content-type': 'application/zip', 'x-goog-content-length-range': '0,104857600'},
url='http://uploadHere',
)
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
def test_call_function(self, mock_get_conn):
payload = {'executionId': 'wh41ppcyoa6l', 'result': 'Hello World!'}
# fmt: off
call = mock_get_conn.return_value.projects.return_value. \
locations.return_value.functions.return_value.call
# fmt: on
call.return_value.execute.return_value = payload
function_id = "function1234"
input_data = {'key': 'value'}
name = "projects/{project_id}/locations/{location}/functions/{function_id}".format(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST, location=GCF_LOCATION, function_id=function_id
)
result = self.gcf_function_hook.call_function(
function_id=function_id,
location=GCF_LOCATION,
input_data=input_data,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
call.assert_called_once_with(body=input_data, name=name)
self.assertDictEqual(result, payload)
@mock.patch('airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook.get_conn')
def test_call_function_error(self, mock_get_conn):
payload = {'error': 'Something very bad'}
# fmt: off
call = mock_get_conn.return_value.projects.return_value. \
locations.return_value.functions.return_value.call
# fmt: on
call.return_value.execute.return_value = payload
function_id = "function1234"
input_data = {'key': 'value'}
with self.assertRaises(AirflowException):
self.gcf_function_hook.call_function(
function_id=function_id,
location=GCF_LOCATION,
input_data=input_data,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
| apache-2.0 |
fedorpatlin/ansible | lib/ansible/modules/network/nxos/nxos_nxapi.py | 53 | 9907 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: nxos_nxapi
extends_documentation_fragment: nxos
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage NXAPI configuration on an NXOS device.
description:
- Configures the NXAPI feature on devices running Cisco NXOS. The
NXAPI feature is absent from the configuration by default. Since
this module manages the NXAPI feature it only supports the use
of the C(Cli) transport.
options:
http_port:
description:
- Configure the port with which the HTTP server will listen on
for requests. By default, NXAPI will bind the HTTP service
to the standard HTTP port 80. This argument accepts valid
port values in the range of 1 to 65535.
required: false
default: 80
http:
description:
- Controls the operating state of the HTTP protocol as one of the
underlying transports for NXAPI. By default, NXAPI will enable
the HTTP transport when the feature is first configured. To
disable the use of the HTTP transport, set the value of this
argument to False.
required: false
default: yes
choices: ['yes', 'no']
aliases: ['enable_http']
https_port:
description:
- Configure the port with which the HTTPS server will listen on
for requests. By default, NXAPI will bind the HTTPS service
to the standard HTTPS port 443. This argument accepts valid
port values in the range of 1 to 65535.
required: false
default: 443
https:
description:
- Controls the operating state of the HTTPS protocol as one of the
underlying transports for NXAPI. By default, NXAPI will disable
the HTTPS transport when the feature is first configured. To
enable the use of the HTTPS transport, set the value of this
argument to True.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_https']
sandbox:
description:
- The NXAPI feature provides a web base UI for developers for
entering commands. This feature is initially disabled when
the NXAPI feature is configured for the first time. When the
C(sandbox) argument is set to True, the developer sandbox URL
will accept requests and when the value is set to False, the
sandbox URL is unavailable.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_sandbox']
state:
description:
- The C(state) argument controls whether or not the NXAPI
feature is configured on the remote device. When the value
is C(present) the NXAPI feature configuration is present in
the device running-config. When the values is C(absent) the
feature configuration is removed from the running-config.
choices: ['present', 'absent']
required: false
default: present
"""
EXAMPLES = """
- name: Enable NXAPI access with default configuration
nxos_nxapi:
state: present
- name: Enable NXAPI with no HTTP, HTTPS at port 9443 and sandbox disabled
nxos_nxapi:
enable_http: false
https_port: 9443
https: yes
enable_sandbox: no
- name: remove NXAPI configuration
nxos_nxapi:
state: absent
"""
RETURN = """
updates:
description:
- Returns the list of commands that need to be pushed into the remote
device to satisfy the arguments
returned: always
type: list
sample: ['no feature nxapi']
"""
import re
from functools import partial
from ansible.module_utils.nxos import run_commands, load_config
from ansible.module_utils.nxos import nxos_argument_spec
from ansible.module_utils.nxos import check_args as nxos_check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig
from ansible.module_utils.six import iteritems
def check_args(module, warnings):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
if 'nxapi' in (transport, provider_transport):
module.fail_json(msg='transport=nxapi is not supporting when configuring nxapi')
nxos_check_args(module, warnings)
state = module.params['state']
if state == 'started':
module.params['state'] = 'present'
warnings.append('state=started is deprecated and will be removed in a '
'a future release. Please use state=present instead')
elif state == 'stopped':
module.params['state'] = 'absent'
warnings.append('state=stopped is deprecated and will be removed in a '
'a future release. Please use state=absent instead')
if module.params['transport'] == 'nxapi':
module.fail_json(msg='module not supported over nxapi transport')
for key in ['config']:
if module.params[key]:
warnings.append('argument %s is deprecated and will be ignored' % key)
return warnings
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
needs_update = lambda x: want.get(x) is not None and (want.get(x) != have.get(x))
if needs_update('state'):
if want['state'] == 'absent':
return ['no feature nxapi']
commands.append('feature nxapi')
if any((needs_update('http'), needs_update('http_port'))):
if want['http'] is True or (want['http'] is None and have['http'] is True):
port = want['http_port'] or 80
commands.append('nxapi http port %s' % port)
elif want['http'] is False:
commands.append('no nxapi http')
if any((needs_update('https'), needs_update('https_port'))):
if want['https'] is True or (want['https'] is None and have['https'] is True):
port = want['https_port'] or 443
commands.append('nxapi https port %s' % port)
elif want['https'] is False:
commands.append('no nxapi https')
if needs_update('sandbox'):
cmd = 'nxapi sandbox'
if not want['sandbox']:
cmd = 'no %s' % cmd
commands.append(cmd)
return commands
def parse_http(data):
match = re.search('HTTP Port:\s+(\d+)', data, re.M)
if match:
return {'http': True, 'http_port': int(match.group(1))}
else:
return {'http': False, 'http_port': None}
def parse_https(data):
match = re.search('HTTPS Port:\s+(\d+)', data, re.M)
if match:
return {'https': True, 'https_port': int(match.group(1))}
else:
return {'https': False, 'https_port': None}
def parse_sandbox(data):
match = re.search('Sandbox:\s+(.+)$', data, re.M)
value = None
if match:
value = match.group(1) == 'Enabled'
return {'sandbox': value}
def map_config_to_obj(module):
out = run_commands(module, ['show nxapi'], check_rc=False)
if out[0] == '':
return {'state': 'absent'}
out = str(out[0]).strip()
obj = {'state': 'present'}
obj.update(parse_http(out))
obj.update(parse_https(out))
obj.update(parse_sandbox(out))
return obj
def validate_http_port(value, module):
if not 1 <= module.params['http_port'] <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
def validate_https_port(value, module):
if not 1 <= module.params['https_port'] <= 65535:
module.fail_json(msg='https_port must be between 1 and 65535')
def map_params_to_obj(module):
obj = {
'http': module.params['http'],
'http_port': module.params['http_port'],
'https': module.params['https'],
'https_port': module.params['https_port'],
'sandbox': module.params['sandbox'],
'state': module.params['state']
}
for key, value in iteritems(obj):
if value:
validator = globals().get('validate_%s' % key)
if validator:
validator(value, module)
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
http=dict(aliases=['enable_http'], type='bool'),
http_port=dict(type='int'),
https=dict(aliases=['enable_https'], type='bool'),
https_port=dict(type='int'),
sandbox=dict(aliases=['enable_sandbox'], type='bool'),
# deprecated (Ansible 2.3) arguments
config=dict(),
state=dict(default='present', choices=['started', 'stopped', 'present', 'absent'])
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
CiscoUcs/Ironic | ironic/tests/conductor/test_conductor_utils.py | 8 | 15831 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as conductor_utils
from ironic import objects
from ironic.tests import base as tests_base
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base
from ironic.tests.db import utils
from ironic.tests.objects import utils as obj_utils
class NodeSetBootDeviceTestCase(base.DbTestCase):
def test_node_set_boot_device_non_existent_device(self):
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
ipmi_info = utils.get_test_ipmi_info()
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool',
driver_info=ipmi_info)
task = task_manager.TaskManager(self.context, node.uuid)
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_set_boot_device,
task,
device='fake')
def test_node_set_boot_device_valid(self):
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
ipmi_info = utils.get_test_ipmi_info()
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool',
driver_info=ipmi_info)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.management,
'set_boot_device') as mock_sbd:
conductor_utils.node_set_boot_device(task,
device='pxe')
mock_sbd.assert_called_once_with(task,
device='pxe',
persistent=False)
class NodePowerActionTestCase(base.DbTestCase):
def setUp(self):
super(NodePowerActionTestCase, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
def test_node_power_action_power_on(self):
"""Test node_power_action to turn node power on."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_OFF
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_power_off(self):
"""Test node_power_action to turn node power off."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_power_reboot(self):
"""Test for reboot a node."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power, 'reboot') as reboot_mock:
conductor_utils.node_power_action(task, states.REBOOT)
node.refresh()
reboot_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_invalid_state(self):
"""Test for exception when changing to an invalid power state."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
"INVALID_POWER_STATE")
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
# last_error is cleared when a new transaction happens
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_already_being_processed(self):
"""Test node power action after aborted power action.
The target_power_state is expected to be None so it isn't
checked in the code. This is what happens if it is not None.
(Eg, if a conductor had died during a previous power-off
attempt and left the target_power_state set to states.POWER_OFF,
and the user is attempting to power-off again.)
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON,
target_power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertEqual(states.NOSTATE, node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_in_same_state(self):
"""Test setting node state to its present state.
Test that we don't try to set the power state if the requested
state is the same as the current state.
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
last_error='anything but None',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_ON
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
conductor_utils.node_power_action(task, states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertFalse(set_power_mock.called,
"set_power_state unexpectedly called")
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_in_same_state_db_not_in_sync(self):
"""Test setting node state to its present state if DB is out of sync.
Under rare conditions (see bug #1403106) database might contain stale
information, make sure we fix it.
"""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
last_error='anything but None',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
get_power_mock.return_value = states.POWER_OFF
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
conductor_utils.node_power_action(task, states.POWER_OFF)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
self.assertFalse(set_power_mock.called,
"set_power_state unexpectedly called")
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
def test_node_power_action_failed_getting_state(self):
"""Test for exception when we can't get the current power state."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_ON)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_state_mock:
get_power_state_mock.side_effect = (
exception.InvalidParameterValue('failed getting power state'))
self.assertRaises(exception.InvalidParameterValue,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_state_mock.assert_called_once_with(mock.ANY)
self.assertEqual(states.POWER_ON, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
def test_node_power_action_set_power_failure(self):
"""Test if an exception is thrown when the set_power call fails."""
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake',
power_state=states.POWER_OFF)
task = task_manager.TaskManager(self.context, node.uuid)
with mock.patch.object(self.driver.power,
'get_power_state') as get_power_mock:
with mock.patch.object(self.driver.power,
'set_power_state') as set_power_mock:
get_power_mock.return_value = states.POWER_OFF
set_power_mock.side_effect = exception.IronicException()
self.assertRaises(
exception.IronicException,
conductor_utils.node_power_action,
task,
states.POWER_ON)
node.refresh()
get_power_mock.assert_called_once_with(mock.ANY)
set_power_mock.assert_called_once_with(mock.ANY,
states.POWER_ON)
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
class CleanupAfterTimeoutTestCase(tests_base.TestCase):
def setUp(self):
super(CleanupAfterTimeoutTestCase, self).setUp()
self.task = mock.Mock(spec=task_manager.TaskManager)
self.task.context = mock.sentinel.context
self.task.driver = mock.Mock(spec_set=['deploy'])
self.task.shared = False
self.task.node = mock.Mock(spec_set=objects.Node)
self.node = self.task.node
def test_cleanup_after_timeout(self):
conductor_utils.cleanup_after_timeout(self.task)
self.node.save.assert_called_once_with()
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
self.assertIn('Timeout reached', self.node.last_error)
def test_cleanup_after_timeout_shared_lock(self):
self.task.shared = True
self.assertRaises(exception.ExclusiveLockRequired,
conductor_utils.cleanup_after_timeout,
self.task)
def test_cleanup_after_timeout_cleanup_ironic_exception(self):
clean_up_mock = self.task.driver.deploy.clean_up
clean_up_mock.side_effect = exception.IronicException('moocow')
conductor_utils.cleanup_after_timeout(self.task)
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
self.assertEqual([mock.call()] * 2, self.node.save.call_args_list)
self.assertIn('moocow', self.node.last_error)
def test_cleanup_after_timeout_cleanup_random_exception(self):
clean_up_mock = self.task.driver.deploy.clean_up
clean_up_mock.side_effect = Exception('moocow')
conductor_utils.cleanup_after_timeout(self.task)
self.task.driver.deploy.clean_up.assert_called_once_with(self.task)
self.assertEqual([mock.call()] * 2, self.node.save.call_args_list)
self.assertIn('Deploy timed out', self.node.last_error)
| apache-2.0 |
minhphung171093/GreenERP_V7 | openerp/addons/event_sale/event_sale.py | 23 | 4880 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product(osv.osv):
_inherit = 'product.product'
_columns = {
'event_ok': fields.boolean('Event Subscription', help='Determine if a product needs to create automatically an event registration at the confirmation of a sales order line.'),
'event_type_id': fields.many2one('event.type', 'Type of Event', help='Select event types so when we use this product in sales order lines, it will filter events of this type only.'),
}
def onchange_event_ok(self, cr, uid, ids, event_ok, context=None):
return {'value': {'type': event_ok and 'service' or False}}
product()
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'event_id': fields.many2one('event.event', 'Event', help="Choose an event and it will automatically create a registration for this event."),
#those 2 fields are used for dynamic domains and filled by onchange
'event_type_id': fields.related('product_id','event_type_id', type='many2one', relation="event.type", string="Event Type"),
'event_ok': fields.related('product_id', 'event_ok', string='event_ok', type='boolean'),
}
def product_id_change(self, cr, uid, ids,
pricelist,
product, qty=0,
uom=False,
qty_uos=0,
uos=False,
name='',
partner_id=False,
lang=False,
update_tax=True,
date_order=False,
packaging=False,
fiscal_position=False,
flag=False, context=None):
"""
check product if event type
"""
res = super(sale_order_line,self).product_id_change(cr, uid, ids, pricelist, product, qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id, lang=lang, update_tax=update_tax, date_order=date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
if product:
product_res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if product_res.event_ok:
res['value'].update(event_type_id=product_res.event_type_id.id,
event_ok=product_res.event_ok)
else:
res['value'].update(event_type_id=False,
event_ok=False)
return res
def button_confirm(self, cr, uid, ids, context=None):
'''
create registration with sales order
'''
registration_obj = self.pool.get('event.registration')
sale_obj = self.pool.get('sale.order')
for order_line in self.browse(cr, uid, ids, context=context):
if order_line.event_id.id:
dic = {
'name': order_line.order_id.partner_invoice_id.name,
'partner_id': order_line.order_id.partner_id.id,
'nb_register': int(order_line.product_uom_qty),
'email': order_line.order_id.partner_id.email,
'phone': order_line.order_id.partner_id.phone,
'origin': order_line.order_id.name,
'event_id': order_line.event_id.id,
}
registration_id = registration_obj.create(cr, uid, dic, context=context)
message = _("The registration %s has been created from the Sales Order %s.") % (registration_id, order_line.order_id.name)
registration_obj.message_post(cr, uid, [registration_id], body=message, context=context)
return super(sale_order_line, self).button_confirm(cr, uid, ids, context=context)
| agpl-3.0 |
gicsi/aap | src/machine_learning/nltk-trainer-master/analyze_tagged_corpus.py | 6 | 3565 | #!/usr/bin/env python
import argparse
import collections
import nltk.corpus
from nltk.corpus.util import LazyCorpusLoader
from nltk_trainer import basestring, load_corpus_reader, simplify_wsj_tag
########################################
## command options & argument parsing ##
########################################
parser = argparse.ArgumentParser(description='Analyze a part-of-speech tagged corpus',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('corpus',
help='''The name of a tagged corpus included with NLTK, such as treebank,
brown, cess_esp, floresta, or the root path to a corpus directory,
which can be either an absolute path or relative to a nltk_data directory.''')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to %(default)d. 0 is no trace output.')
corpus_group = parser.add_argument_group('Corpus Reader Options')
corpus_group.add_argument('--reader', default=None,
help='''Full module path to a corpus reader class, such as
nltk.corpus.reader.tagged.TaggedCorpusReader''')
corpus_group.add_argument('--fileids', default=None,
help='Specify fileids to load from corpus')
if simplify_wsj_tag:
corpus_group.add_argument('--simplify_tags', action='store_true', default=False,
help='Use simplified tags')
else:
corpus_group.add_argument('--tagset', default=None,
help='Map tags to a given tagset, such as "universal"')
sort_group = parser.add_argument_group('Tag Count Sorting Options')
sort_group.add_argument('--sort', default='tag', choices=['tag', 'count'],
help='Sort key, defaults to %(default)s')
sort_group.add_argument('--reverse', action='store_true', default=False,
help='Sort in revere order')
args = parser.parse_args()
###################
## corpus reader ##
###################
tagged_corpus = load_corpus_reader(args.corpus, reader=args.reader, fileids=args.fileids)
if not tagged_corpus:
raise ValueError('%s is an unknown corpus')
if args.trace:
print('loading %s' % args.corpus)
##############
## counting ##
##############
wc = 0
tag_counts = collections.defaultdict(int)
taglen = 7
word_set = set()
if simplify_wsj_tag and args.simplify_tags and args.corpus not in ['conll2000', 'switchboard']:
kwargs = {'simplify_tags': True}
elif not simplify_wsj_tag and args.tagset:
kwargs = {'tagset': args.tagset}
else:
kwargs = {}
for word, tag in tagged_corpus.tagged_words(fileids=args.fileids, **kwargs):
if not tag:
continue
if len(tag) > taglen:
taglen = len(tag)
if args.corpus in ['conll2000', 'switchboard'] and simplify_wsj_tag and args.simplify_tags:
tag = simplify_wsj_tag(tag)
wc += 1
# loading corpora/treebank/tagged with ChunkedCorpusReader produces None tags
if not isinstance(tag, basestring): tag = str(tag)
tag_counts[tag] += 1
word_set.add(word)
############
## output ##
############
print('%d total words\n%d unique words\n%d tags\n' % (wc, len(word_set), len(tag_counts)))
if args.sort == 'tag':
sort_key = lambda tc: tc[0]
elif args.sort == 'count':
sort_key = lambda tc: tc[1]
else:
raise ValueError('%s is not a valid sort option' % args.sort)
sorted_tag_counts = sorted(tag_counts.items(), key=sort_key, reverse=args.reverse)
countlen = max(len(str(sorted_tag_counts[0][1])) + 2, 9)
# simple reSt table format
print(' '.join(['Tag'.center(taglen), 'Count'.center(countlen)]))
print(' '.join(['='*taglen, '='*(countlen)]))
for tag, count in sorted_tag_counts:
print(' '.join([tag.ljust(taglen), str(count).rjust(countlen)]))
print(' '.join(['='*taglen, '='*(countlen)])) | gpl-3.0 |
wwright2/dcim3-angstrom1 | sources/openembedded-core/meta/lib/oe/distro_check.py | 8 | 15275 | def get_links_from_url(url):
"Return all the href links found on the web location"
import urllib, sgmllib
class LinksParser(sgmllib.SGMLParser):
def parse(self, s):
"Parse the given string 's'."
self.feed(s)
self.close()
def __init__(self, verbose=0):
"Initialise an object passing 'verbose' to the superclass."
sgmllib.SGMLParser.__init__(self, verbose)
self.hyperlinks = []
def start_a(self, attributes):
"Process a hyperlink and its 'attributes'."
for name, value in attributes:
if name == "href":
self.hyperlinks.append(value.strip('/'))
def get_hyperlinks(self):
"Return the list of hyperlinks."
return self.hyperlinks
sock = urllib.urlopen(url)
webpage = sock.read()
sock.close()
linksparser = LinksParser()
linksparser.parse(webpage)
return linksparser.get_hyperlinks()
def find_latest_numeric_release(url):
"Find the latest listed numeric release on the given url"
max=0
maxstr=""
for link in get_links_from_url(url):
try:
release = float(link)
except:
release = 0
if release > max:
max = release
maxstr = link
return maxstr
def is_src_rpm(name):
"Check if the link is pointing to a src.rpm file"
if name[-8:] == ".src.rpm":
return True
else:
return False
def package_name_from_srpm(srpm):
"Strip out the package name from the src.rpm filename"
strings = srpm.split('-')
package_name = strings[0]
for i in range(1, len (strings) - 1):
str = strings[i]
if not str[0].isdigit():
package_name += '-' + str
return package_name
def clean_package_list(package_list):
"Removes multiple entries of packages and sorts the list"
set = {}
map(set.__setitem__, package_list, [])
return set.keys()
def get_latest_released_meego_source_package_list():
"Returns list of all the name os packages in the latest meego distro"
package_names = []
try:
f = open("/tmp/Meego-1.1", "r")
for line in f:
package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
except IOError: pass
package_list=clean_package_list(package_names)
return "1.0", package_list
def get_source_package_list_from_url(url, section):
"Return a sectioned list of package names from a URL list"
bb.note("Reading %s: %s" % (url, section))
links = get_links_from_url(url)
srpms = filter(is_src_rpm, links)
names_list = map(package_name_from_srpm, srpms)
new_pkgs = []
for pkgs in names_list:
new_pkgs.append(pkgs + ":" + section)
return new_pkgs
def get_latest_released_fedora_source_package_list():
"Returns list of all the name os packages in the latest fedora distro"
latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/")
package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main")
# package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def get_latest_released_opensuse_source_package_list():
"Returns list of all the name os packages in the latest opensuse distro"
latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/")
package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main")
package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def get_latest_released_mandriva_source_package_list():
"Returns list of all the name os packages in the latest mandriva distro"
latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/")
package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main")
# package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def find_latest_debian_release(url):
"Find the latest listed debian release on the given url"
releases = []
for link in get_links_from_url(url):
if link[:6] == "Debian":
if ';' not in link:
releases.append(link)
releases.sort()
try:
return releases.pop()[6:]
except:
return "_NotFound_"
def get_debian_style_source_package_list(url, section):
"Return the list of package-names stored in the debian style Sources.gz file"
import urllib
sock = urllib.urlopen(url)
import tempfile
tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
tmpfilename=tmpfile.name
tmpfile.write(sock.read())
sock.close()
tmpfile.close()
import gzip
bb.note("Reading %s: %s" % (url, section))
f = gzip.open(tmpfilename)
package_names = []
for line in f:
if line[:9] == "Package: ":
package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
os.unlink(tmpfilename)
return package_names
def get_latest_released_debian_source_package_list():
"Returns list of all the name os packages in the latest debian distro"
latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/")
url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
package_names = get_debian_style_source_package_list(url, "main")
# url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz"
# package_names += get_debian_style_source_package_list(url, "contrib")
url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
package_names += get_debian_style_source_package_list(url, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def find_latest_ubuntu_release(url):
"Find the latest listed ubuntu release on the given url"
url += "?C=M;O=D" # Descending Sort by Last Modified
for link in get_links_from_url(url):
if link[-8:] == "-updates":
return link[:-8]
return "_NotFound_"
def get_latest_released_ubuntu_source_package_list():
"Returns list of all the name os packages in the latest ubuntu distro"
latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/")
url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
package_names = get_debian_style_source_package_list(url, "main")
# url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
# package_names += get_debian_style_source_package_list(url, "multiverse")
# url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
# package_names += get_debian_style_source_package_list(url, "universe")
url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
package_names += get_debian_style_source_package_list(url, "updates")
package_list=clean_package_list(package_names)
return latest, package_list
def create_distro_packages_list(distro_check_dir):
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
if not os.path.isdir (pkglst_dir):
os.makedirs(pkglst_dir)
# first clear old stuff
for file in os.listdir(pkglst_dir):
os.unlink(os.path.join(pkglst_dir, file))
per_distro_functions = [
["Debian", get_latest_released_debian_source_package_list],
["Ubuntu", get_latest_released_ubuntu_source_package_list],
["Fedora", get_latest_released_fedora_source_package_list],
["OpenSuSE", get_latest_released_opensuse_source_package_list],
["Mandriva", get_latest_released_mandriva_source_package_list],
["Meego", get_latest_released_meego_source_package_list]
]
from datetime import datetime
begin = datetime.now()
for distro in per_distro_functions:
name = distro[0]
release, package_list = distro[1]()
bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
package_list_file = os.path.join(pkglst_dir, name + "-" + release)
f = open(package_list_file, "w+b")
for pkg in package_list:
f.write(pkg + "\n")
f.close()
end = datetime.now()
delta = end - begin
bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
def update_distro_data(distro_check_dir, datetime):
"""
If distro packages list data is old then rebuild it.
The operations has to be protected by a lock so that
only one thread performes it at a time.
"""
if not os.path.isdir (distro_check_dir):
try:
bb.note ("Making new directory: %s" % distro_check_dir)
os.makedirs (distro_check_dir)
except OSError:
raise Exception('Unable to create directory %s' % (distro_check_dir))
datetime_file = os.path.join(distro_check_dir, "build_datetime")
saved_datetime = "_invalid_"
import fcntl
try:
if not os.path.exists(datetime_file):
open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail
f = open(datetime_file, "r+b")
fcntl.lockf(f, fcntl.LOCK_EX)
saved_datetime = f.read()
if saved_datetime[0:8] != datetime[0:8]:
bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
bb.note("Regenerating distro package lists")
create_distro_packages_list(distro_check_dir)
f.seek(0)
f.write(datetime)
except OSError:
raise Exception('Unable to read/write this file: %s' % (datetime_file))
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
def compare_in_distro_packages_list(distro_check_dir, d):
if not os.path.isdir(distro_check_dir):
raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
localdata = bb.data.createCopy(d)
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
matching_distros = []
pn = d.getVar('PN', True)
recipe_name = d.getVar('PN', True)
bb.note("Checking: %s" % pn)
trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[0]
if pn.startswith("nativesdk-"):
pnstripped = pn.split("nativesdk-")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[1]
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[0]
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
recipe_name = pnstripped[0]
bb.note("Recipe: %s" % recipe_name)
tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
if tmp:
list = tmp.split(' ')
for str in list:
if str and str.find("=") == -1 and distro_exceptions[str]:
matching_distros.append(str)
distro_pn_aliases = {}
if tmp:
list = tmp.split(' ')
for str in list:
if str.find("=") != -1:
(dist, pn_alias) = str.split('=')
distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
for file in os.listdir(pkglst_dir):
(distro, distro_release) = file.split("-")
f = open(os.path.join(pkglst_dir, file), "rb")
for line in f:
(pkg, section) = line.split(":")
if distro.lower() in distro_pn_aliases:
pn = distro_pn_aliases[distro.lower()]
else:
pn = recipe_name
if pn == pkg:
matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
f.close()
break
f.close()
if tmp != None:
list = tmp.split(' ')
for item in list:
matching_distros.append(item)
bb.note("Matching: %s" % matching_distros)
return matching_distros
def create_log_file(d, logname):
import subprocess
logpath = d.getVar('LOG_DIR', True)
bb.utils.mkdirhier(logpath)
logfn, logsuffix = os.path.splitext(logname)
logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
if not os.path.exists(logfile):
slogfile = os.path.join(logpath, logname)
if os.path.exists(slogfile):
os.remove(slogfile)
subprocess.call("touch %s" % logfile, shell=True)
os.symlink(logfile, slogfile)
d.setVar('LOG_FILE', logfile)
return logfile
def save_distro_check_result(result, datetime, result_file, d):
pn = d.getVar('PN', True)
logdir = d.getVar('LOG_DIR', True)
if not logdir:
bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
return
if not os.path.isdir(logdir):
os.makedirs(logdir)
line = pn
for i in result:
line = line + "," + i
f = open(result_file, "a")
import fcntl
fcntl.lockf(f, fcntl.LOCK_EX)
f.seek(0, os.SEEK_END) # seek to the end of file
f.write(line + "\n")
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
| mit |
michaelhabeck/isdhic | tests/test_hmc_chromosome.py | 1 | 2824 | import isdhic
import numpy as np
from isdhic import utils
from isdhic.core import take_time
from scipy import optimize
class HamiltonianMonteCarlo(isdhic.HamiltonianMonteCarlo):
stepsizes = []
@property
def stepsize(self):
return self.leapfrog.stepsize
@stepsize.setter
def stepsize(self, value):
self.leapfrog.stepsize = float(value)
HamiltonianMonteCarlo.stepsizes.append(self.leapfrog.stepsize)
def next(self):
result = super(HamiltonianMonteCarlo, self).next()
if len(self.history) and not len(self.history) % 20:
print '{0}, stepsize = {1:.3e}, log_prob = {2:.3e}'.format(
self.history, self.stepsize, self.state.potential_energy)
return result
if __name__ == '__main__':
pymol = utils.ChainViewer()
## set up X chromosome simulation at 500 kb / 50 kb resolution
resolution = 500 ## Kb
filename = '../scripts/chrX_cell1_{0}kb.py'.format(resolution)
with open(filename) as script:
exec script
## start from stretched out chromosome structure
extended = np.multiply.outer(np.arange(n_particles), np.eye(3)[0]) * diameter
coords.set(extended)
## use Hamiltonian Monte Carlo generate X chromosome structures
## from the posterior distribution
hmc = HamiltonianMonteCarlo(posterior,stepsize=1e-3)
hmc.leapfrog.n_steps = 100
hmc.adapt_until = int(1e6)
hmc.activate()
samples = []
with take_time('running HMC'):
while len(samples) < 1e3:
samples.append(hmc.next())
X = np.array([state.positions for state in samples]).reshape(len(samples),-1,3)
E = np.array([state.potential_energy for state in samples])
K = np.array([state.kinetic_energy for state in samples])
if False:
from isd import utils
prior = posterior.priors[0]
E_isd = utils.Load('/tmp/E')
X = E_isd.torsion_angles.copy()
if False:
x = coords.get().copy()
## check gradient
energy = hmc.leapfrog.hamiltonian.potential_energy
gradient = hmc.leapfrog.hamiltonian.gradient_positions
with take_time('calculating energy'):
E = energy(x)
print E
a = gradient(x)
b = optimize.approx_fprime(x, energy, 1e-6)
print np.fabs(a-b).max(), round(100*np.corrcoef(a,b)[0,1])
if False:
from scipy import optimize
class Target(object):
def __init__(self, posterior):
self.posterior = posterior
def __call__(self, x):
self.posterior.params['coordinates'].set(x)
return self.posterior.log_prob()
target = Target(posterior)
posterior.update_forces()
a = posterior.params['forces'].get()
x = posterior.params['coordinates'].get().copy()
b = optimize.approx_fprime(x, target, 1e-5)
| mit |
adaxi/couchpotato | libs/pyutil/repeatable_random.py | 106 | 3622 | """
If you execute force_repeatability() then the following things are changed in the runtime:
1. random.random() and its sibling functions, and random.Random.seed() in the random module are seeded with a known seed so that they will return the same sequence on each run.
2. os.urandom() is replaced by a fake urandom that returns a pseudorandom sequence.
3. time.time() is replaced by a fake time that returns an incrementing number. (Original time.time is available as time.realtime.)
Which seed will be used?
If the environment variable REPEATABLE_RANDOMNESS_SEED is set, then it will use that. Else, it will use the current real time. In either case it logs the seed that it used.
Caveats:
1. If some code has acquired a random.Random object before force_repeatability() is executed, then that Random object will produce non-reproducible results. For example, the tempfile module in the Python Standard Library does this.
2. Likewise if some code called time.time() before force_repeatability() was called, then it will have gotten a real time stamp. For example, trial does this. (Then it later subtracts that real timestamp from a faketime timestamp to calculate elapsed time, resulting in a large negative elapsed time.)
3. Fake urandom has an added constraint for performance reasons -- you can't ask it for more than 64 bytes of randomness at a time. (I couldn't figure out how to generate large fake random strings efficiently.)
"""
import os, random, time
if not hasattr(time, "realtime"):
time.realtime = time.time
if not hasattr(os, "realurandom"):
os.realurandom = os.urandom
if not hasattr(random, "realseed"):
random.realseed = random.seed
tdelta = 0
seeded = False
def force_repeatability():
now = 1043659734.0
def faketime():
global tdelta
tdelta += 1
return now + tdelta
time.faketime = faketime
time.time = faketime
from idlib import i2b
def fakeurandom(n):
if n > 64:
raise ("Can't produce more than 64 bytes of pseudorandomness efficiently.")
elif n == 0:
return ''
else:
z = i2b(random.getrandbits(n*8))
x = z + "0" * (n-len(z))
assert len(x) == n
return x
os.fakeurandom = fakeurandom
os.urandom = fakeurandom
global seeded
if not seeded:
SEED = os.environ.get('REPEATABLE_RANDOMNESS_SEED', None)
if SEED is None:
# Generate a seed which is integral and fairly short (to ease cut-and-paste, writing it down, etc.).
t = time.realtime()
subsec = t % 1
t += (subsec * 1000000)
t %= 1000000
SEED = long(t)
import sys
sys.stdout.write("REPEATABLE_RANDOMNESS_SEED: %s\n" % SEED) ; sys.stdout.flush()
sys.stdout.write("In order to reproduce this run of the code, set the environment variable \"REPEATABLE_RANDOMNESS_SEED\" to %s before executing.\n" % SEED) ; sys.stdout.flush()
random.seed(SEED)
def seed_which_refuses(a):
sys.stdout.write("I refuse to reseed to %s. Go away!\n" % (a,)) ; sys.stdout.flush()
return
random.realseed = random.seed
random.seed = seed_which_refuses
seeded = True
import setutil
setutil.RandomSet.DETERMINISTIC = True
def restore_real_clock():
time.time = time.realtime
def restore_real_urandom():
os.urandom = os.realurandom
def restore_real_seed():
random.seed = random.realseed
def restore_non_repeatability():
restore_real_seed()
restore_real_urandom()
restore_real_clock()
| gpl-3.0 |
vinay94185vinay/Hybrid | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
cldershem/osf.io | website/addons/figshare/views/auth.py | 12 | 4131 | # -*- coding: utf-8 -*-
import os
import httplib as http
from flask import request
from framework.flask import redirect # VOL-aware redirect
from framework.exceptions import HTTPError
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from website import models
from website.util import web_url_for
from website.project.decorators import must_have_addon
from website.project.decorators import must_have_permission
from ..auth import oauth_start_url, oauth_get_token
@must_be_logged_in
def figshare_oauth_start(auth, **kwargs):
user = auth.user
nid = kwargs.get('nid') or kwargs.get('pid')
node = models.Node.load(nid) if nid else None
if node and not node.is_contributor(user):
raise HTTPError(http.FORBIDDEN)
user.add_addon('figshare')
figshare_user = user.get_addon('figshare')
if node:
figshare_node = node.get_addon('figshare')
figshare_node.user_settings = figshare_user
figshare_node.save()
request_token, request_token_secret, authorization_url = oauth_start_url(user, node)
figshare_user.oauth_request_token = request_token
figshare_user.oauth_request_token_secret = request_token_secret
figshare_user.save()
return redirect(authorization_url)
@must_have_permission('write')
@must_have_addon('figshare', 'node')
def figshare_oauth_delete_node(auth, node_addon, **kwargs):
node = kwargs['node'] or kwargs['project']
node_addon.user_settings = None
node_addon.figshare_id = None
node_addon.figshare_type = None
node_addon.figshare_title = None
node_addon.save()
node.add_log(
action='figshare_content_unlinked',
params={
'project': node.parent_id,
'node': node._id,
'figshare': {
'type': node_addon.figshare_type,
'id': node_addon.figshare_id,
}
},
auth=auth,
)
return {}
@collect_auth
def figshare_oauth_callback(auth, **kwargs):
user = auth.user
nid = kwargs.get('nid') or kwargs.get('pid')
node = models.Node.load(nid) if nid else None
# Fail if node provided and user not contributor
if node and not node.is_contributor(user):
raise HTTPError(http.FORBIDDEN)
if user is None:
raise HTTPError(http.NOT_FOUND)
if kwargs.get('nid') and not node:
raise HTTPError(http.NOT_FOUND)
figshare_user = user.get_addon('figshare')
verifier = request.args.get('oauth_verifier')
access_token, access_token_secret = oauth_get_token(
figshare_user.oauth_request_token,
figshare_user.oauth_request_token_secret,
verifier
)
# Handle request cancellations from FigShare's API
if not access_token or not access_token_secret:
if node:
return redirect(node.web_url_for('node_setting'))
return redirect(web_url_for('user_addons'))
figshare_user.oauth_request_token = None
figshare_user.oauth_request_token_secret = None
figshare_user.oauth_access_token = access_token
figshare_user.oauth_access_token_secret = access_token_secret
figshare_user.save()
if node:
figshare_node = node.get_addon('figshare')
figshare_node.user_settings = figshare_user
figshare_node.save()
if node:
return redirect(os.path.join(node.url, 'settings'))
return redirect(web_url_for('user_addons'))
@must_have_permission('write')
@must_have_addon('figshare', 'node')
def figshare_add_user_auth(auth, **kwargs):
user = auth.user
node = kwargs['node'] or kwargs['project']
figshare_node = node.get_addon('figshare')
figshare_user = user.get_addon('figshare')
if figshare_node is None or figshare_user is None:
raise HTTPError(http.BAD_REQUEST)
figshare_node.user_settings = figshare_user
# ensure api url is correct
figshare_node.save()
return {}
@must_be_logged_in
@must_have_addon('figshare', 'user')
def figshare_oauth_delete_user(user_addon, **kwargs):
user_addon.remove_auth(save=True)
return {}
| apache-2.0 |
adamwwt/chvac | venv/lib/python2.7/site-packages/gunicorn/sock.py | 31 | 6594 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import socket
import stat
import sys
import time
from gunicorn import util
from gunicorn.six import string_types
SD_LISTEN_FDS_START = 3
class BaseSocket(object):
def __init__(self, address, conf, log, fd=None):
self.log = log
self.conf = conf
self.cfg_addr = address
if fd is None:
sock = socket.socket(self.FAMILY, socket.SOCK_STREAM)
else:
sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM)
self.sock = self.set_options(sock, bound=(fd is not None))
def __str__(self, name):
return "<socket %d>" % self.sock.fileno()
def __getattr__(self, name):
return getattr(self.sock, name)
def set_options(self, sock, bound=False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if not bound:
self.bind(sock)
sock.setblocking(0)
sock.listen(self.conf.backlog)
return sock
def bind(self, sock):
sock.bind(self.cfg_addr)
def close(self):
try:
self.sock.close()
except socket.error as e:
self.log.info("Error while closing socket %s", str(e))
time.sleep(0.3)
del self.sock
class TCPSocket(BaseSocket):
FAMILY = socket.AF_INET
def __str__(self):
if self.conf.is_ssl:
scheme = "https"
else:
scheme = "http"
addr = self.sock.getsockname()
return "%s://%s:%d" % (scheme, addr[0], addr[1])
def set_options(self, sock, bound=False):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return super(TCPSocket, self).set_options(sock, bound=bound)
class TCP6Socket(TCPSocket):
FAMILY = socket.AF_INET6
def __str__(self):
(host, port, fl, sc) = self.sock.getsockname()
return "http://[%s]:%d" % (host, port)
class UnixSocket(BaseSocket):
FAMILY = socket.AF_UNIX
def __init__(self, addr, conf, log, fd=None):
if fd is None:
try:
st = os.stat(addr)
except OSError as e:
if e.args[0] != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(addr)
else:
raise ValueError("%r is not a socket" % addr)
super(UnixSocket, self).__init__(addr, conf, log, fd=fd)
def __str__(self):
return "unix:%s" % self.cfg_addr
def bind(self, sock):
old_umask = os.umask(self.conf.umask)
sock.bind(self.cfg_addr)
util.chown(self.cfg_addr, self.conf.uid, self.conf.gid)
os.umask(old_umask)
def close(self):
super(UnixSocket, self).close()
os.unlink(self.cfg_addr)
def _sock_type(addr):
if isinstance(addr, tuple):
if util.is_ipv6(addr[0]):
sock_type = TCP6Socket
else:
sock_type = TCPSocket
elif isinstance(addr, string_types):
sock_type = UnixSocket
else:
raise TypeError("Unable to create socket from: %r" % addr)
return sock_type
def create_sockets(conf, log):
"""
Create a new socket for the given address. If the
address is a tuple, a TCP socket is created. If it
is a string, a Unix socket is created. Otherwise
a TypeError is raised.
"""
listeners = []
if 'LISTEN_PID' in os.environ and int(os.environ.get('LISTEN_PID')) == os.getpid():
for i in range(int(os.environ.get('LISTEN_FDS', 0))):
fd = i + SD_LISTEN_FDS_START
try:
sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)
sockname = sock.getsockname()
if isinstance(sockname, str) and sockname.startswith('/'):
listeners.append(UnixSocket(sockname, conf, log, fd=fd))
elif len(sockname) == 2 and '.' in sockname[0]:
listeners.append(TCPSocket("%s:%s" % sockname, conf, log, fd=fd))
elif len(sockname) == 4 and ':' in sockname[0]:
listeners.append(TCP6Socket("[%s]:%s" % sockname[:2], conf, log, fd=fd))
except socket.error:
pass
del os.environ['LISTEN_PID'], os.environ['LISTEN_FDS']
if listeners:
log.debug('Socket activation sockets: %s', ",".join([str(l) for l in listeners]))
return listeners
# get it only once
laddr = conf.address
# check ssl config early to raise the error on startup
# only the certfile is needed since it can contains the keyfile
if conf.certfile and not os.path.exists(conf.certfile):
raise ValueError('certfile "%s" does not exist' % conf.certfile)
if conf.keyfile and not os.path.exists(conf.keyfile):
raise ValueError('keyfile "%s" does not exist' % conf.keyfile)
# sockets are already bound
if 'GUNICORN_FD' in os.environ:
fds = os.environ.pop('GUNICORN_FD').split(',')
for i, fd in enumerate(fds):
fd = int(fd)
addr = laddr[i]
sock_type = _sock_type(addr)
try:
listeners.append(sock_type(addr, conf, log, fd=fd))
except socket.error as e:
if e.args[0] == errno.ENOTCONN:
log.error("GUNICORN_FD should refer to an open socket.")
else:
raise
return listeners
# no sockets is bound, first initialization of gunicorn in this env.
for addr in laddr:
sock_type = _sock_type(addr)
# If we fail to create a socket from GUNICORN_FD
# we fall through and try and open the socket
# normally.
sock = None
for i in range(5):
try:
sock = sock_type(addr, conf, log)
except socket.error as e:
if e.args[0] == errno.EADDRINUSE:
log.error("Connection in use: %s", str(addr))
if e.args[0] == errno.EADDRNOTAVAIL:
log.error("Invalid address: %s", str(addr))
sys.exit(1)
if i < 5:
log.error("Retrying in 1 second.")
time.sleep(1)
else:
break
if sock is None:
log.error("Can't connect to %s", str(addr))
sys.exit(1)
listeners.append(sock)
return listeners
| mit |
willkg/socorro-collector | collector/webapi/servers.py | 1 | 3568 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import web
import os
from collector.webapi.class_partial import class_with_partial_init
from configman import Namespace, RequiredConfig
#==============================================================================
class WebServerBase(RequiredConfig):
required_config = Namespace()
#--------------------------------------------------------------------------
def __init__(self, config, services_list):
self.config = config
urls = []
for each in services_list:
if hasattr(each, 'uri'):
# this service has a hard coded uri embedded within
uri, cls = each.uri, each
config.logger.debug(
'embedded uri class %s %s',
cls.__name__,
uri
)
else:
# this is a uri, service pair
uri, cls = each
config.logger.debug(
'service pair uri class %s %s',
cls.__name__,
uri
)
if isinstance(uri, basestring):
uri = (uri, )
for a_uri in uri:
urls.append(a_uri)
if hasattr(cls, 'wrapped_partial'):
config.logger.debug(
"appending already wrapped %s",
cls.__name__
)
urls.append(cls)
else:
config.logger.debug(
"wrapping %s",
cls.__name__
)
urls.append(class_with_partial_init(cls, config))
self.urls = tuple(urls)
web.webapi.internalerror = web.debugerror
web.config.debug = False
self._identify()
self._wsgi_func = web.application(self.urls, globals()).wsgifunc()
#--------------------------------------------------------------------------
def run(self):
raise NotImplemented
#--------------------------------------------------------------------------
def _identify(self):
pass
#==============================================================================
class WSGIServer(WebServerBase):
"""When running under a wsgi compatible Web server, modwsgi requires a
reference to a "wsgifunc" In this varient of the WebServer class, the run
function returns the result of the webpy framework's wsgifunc.
Applications that use this class must provide a module level variable
'application' in the module given to the Web server modwsgi configuration.
The value of the variable must be the _wsgi_func.
"""
#--------------------------------------------------------------------------
def run(self):
return self._wsgi_func
#--------------------------------------------------------------------------
def _identify(self):
self.config.logger.info('this is WSGIServer')
#--------------------------------------------------------------------------
@staticmethod
def get_socorro_config_path(wsgi_file):
wsgi_path = os.path.dirname(os.path.realpath(wsgi_file))
config_path = os.path.join(wsgi_path, '..', 'config')
return os.path.abspath(config_path)
ApacheModWSGI = WSGIServer # for backwards compatiblity
| mpl-2.0 |
nugget/home-assistant | homeassistant/components/sensor/serial.py | 7 | 3672 | """
Support for reading data from a serial port.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.serial/
"""
import logging
import json
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_VALUE_TEMPLATE, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pyserial-asyncio==0.4']
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_PORT = 'serial_port'
CONF_BAUDRATE = 'baudrate'
DEFAULT_NAME = "Serial Sensor"
DEFAULT_BAUDRATE = 9600
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SERIAL_PORT): cv.string,
vol.Optional(CONF_BAUDRATE, default=DEFAULT_BAUDRATE):
cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Serial sensor platform."""
name = config.get(CONF_NAME)
port = config.get(CONF_SERIAL_PORT)
baudrate = config.get(CONF_BAUDRATE)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
sensor = SerialSensor(name, port, baudrate, value_template)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, sensor.stop_serial_read())
async_add_entities([sensor], True)
class SerialSensor(Entity):
"""Representation of a Serial sensor."""
def __init__(self, name, port, baudrate, value_template):
"""Initialize the Serial sensor."""
self._name = name
self._state = None
self._port = port
self._baudrate = baudrate
self._serial_loop_task = None
self._template = value_template
self._attributes = []
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
self._serial_loop_task = self.hass.loop.create_task(
self.serial_read(self._port, self._baudrate))
async def serial_read(self, device, rate, **kwargs):
"""Read the data from the port."""
import serial_asyncio
reader, _ = await serial_asyncio.open_serial_connection(
url=device, baudrate=rate, **kwargs)
while True:
line = await reader.readline()
line = line.decode('utf-8').strip()
try:
data = json.loads(line)
if isinstance(data, dict):
self._attributes = data
except ValueError:
pass
if self._template is not None:
line = self._template.async_render_with_possible_json_value(
line)
_LOGGER.debug("Received: %s", line)
self._state = line
self.async_schedule_update_ha_state()
async def stop_serial_read(self):
"""Close resources."""
if self._serial_loop_task:
self._serial_loop_task.cancel()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the attributes of the entity (if any JSON present)."""
return self._attributes
@property
def state(self):
"""Return the state of the sensor."""
return self._state
| apache-2.0 |
ms-iot/python | cpython/Lib/idlelib/idle_test/test_parenmatch.py | 79 | 3538 | """Test idlelib.ParenMatch."""
# This must currently be a gui test because ParenMatch methods use
# several text methods not defined on idlelib.idle_test.mock_tk.Text.
import unittest
from unittest.mock import Mock
from test.support import requires
from tkinter import Tk, Text
from idlelib.ParenMatch import ParenMatch
class DummyEditwin:
def __init__(self, text):
self.text = text
self.indentwidth = 8
self.tabwidth = 8
self.context_use_ps1 = True
class ParenMatchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.text = Text(cls.root)
cls.editwin = DummyEditwin(cls.text)
cls.editwin.text_frame = Mock()
@classmethod
def tearDownClass(cls):
del cls.text, cls.editwin
cls.root.destroy()
del cls.root
def tearDown(self):
self.text.delete('1.0', 'end')
def test_paren_expression(self):
"""
Test ParenMatch with 'expression' style.
"""
text = self.text
pm = ParenMatch(self.editwin)
pm.set_style('expression')
text.insert('insert', 'def foobar(a, b')
pm.flash_paren_event('event')
self.assertIn('<<parenmatch-check-restore>>', text.event_info())
self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
('1.10', '1.15'))
text.insert('insert', ')')
pm.restore_event()
self.assertNotIn('<<parenmatch-check-restore>>', text.event_info())
self.assertEqual(text.tag_prevrange('paren', 'end'), ())
# paren_closed_event can only be tested as below
pm.paren_closed_event('event')
self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
('1.10', '1.16'))
def test_paren_default(self):
"""
Test ParenMatch with 'default' style.
"""
text = self.text
pm = ParenMatch(self.editwin)
pm.set_style('default')
text.insert('insert', 'def foobar(a, b')
pm.flash_paren_event('event')
self.assertIn('<<parenmatch-check-restore>>', text.event_info())
self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
('1.10', '1.11'))
text.insert('insert', ')')
pm.restore_event()
self.assertNotIn('<<parenmatch-check-restore>>', text.event_info())
self.assertEqual(text.tag_prevrange('paren', 'end'), ())
def test_paren_corner(self):
"""
Test corner cases in flash_paren_event and paren_closed_event.
These cases force conditional expression and alternate paths.
"""
text = self.text
pm = ParenMatch(self.editwin)
text.insert('insert', '# this is a commen)')
self.assertIsNone(pm.paren_closed_event('event'))
text.insert('insert', '\ndef')
self.assertIsNone(pm.flash_paren_event('event'))
self.assertIsNone(pm.paren_closed_event('event'))
text.insert('insert', ' a, *arg)')
self.assertIsNone(pm.paren_closed_event('event'))
def test_handle_restore_timer(self):
pm = ParenMatch(self.editwin)
pm.restore_event = Mock()
pm.handle_restore_timer(0)
self.assertTrue(pm.restore_event.called)
pm.restore_event.reset_mock()
pm.handle_restore_timer(1)
self.assertFalse(pm.restore_event.called)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
pombredanne/docpipe | docpipe/cli.py | 1 | 2485 | import logging
import yaml
import click
from barn import open_collection
from docpipe.exc import DocpipeException
from docpipe.pipeline import Pipeline
log = logging.getLogger(__name__)
def execute_pipeline(ctx, fh, operation):
try:
config = yaml.load(fh.read())
fh.close()
except Exception, e:
raise click.ClickException("Cannot parse pipeline: %s" % e)
if 'config' not in config:
config['config'] = {}
collections = ctx.pop('collections', [])
config['config'].update(ctx)
config['config']['threads'] = ctx.pop('threads', None)
collection_configs = config['config'].pop('collections', {})
if not len(collections):
collections = collection_configs.keys()
collections = [c for c in collections if c in collection_configs]
for cname in collections:
cconfig = collection_configs.get(cname)
coll = open_collection(cname, cconfig.pop('type'), **cconfig)
try:
pipeline = Pipeline(coll, fh.name, config=config)
getattr(pipeline, operation)()
except DocpipeException, de:
raise click.ClickException(unicode(de))
@click.group()
@click.option('-c', '--collections', default=None, nargs=-1,
help='The configured collection name to use.')
@click.option('-t', '--threads', default=None, type=int,
help='Number of threads to process data')
@click.option('-d', '--debug', default=False, is_flag=True,
help='Verbose output for debugging')
@click.pass_context
def cli(ctx, collections, threads, debug):
""" A configurable document processing tool. """
ctx.obj = {
'collections': collections,
'debug': debug,
'threads': threads
}
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
@cli.command()
@click.argument('pipeline', type=click.File('rb'))
@click.pass_obj
def run(ctx, pipeline):
""" Execute the given PIPELINE. """
execute_pipeline(ctx, pipeline, 'run')
@cli.command()
@click.argument('pipeline', type=click.File('rb'))
@click.pass_obj
def extract(ctx, pipeline):
""" Execute the extractors in PIPELINE. """
execute_pipeline(ctx, pipeline, 'extract')
@cli.command()
@click.argument('pipeline', type=click.File('rb'))
@click.pass_obj
def transform(ctx, pipeline):
""" Execute the transformers in PIPELINE. """
execute_pipeline(ctx, pipeline, 'transform')
| mit |
mattgiguere/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
Petraea/jsonbot | jsb/contrib/sleekxmpp/plugins/xep_0128/static.py | 4 | 1950 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
import sleekxmpp
from sleekxmpp.plugins.xep_0030 import StaticDisco
log = logging.getLogger(__name__)
class StaticExtendedDisco(object):
"""
Extend the default StaticDisco implementation to provide
support for extended identity information.
"""
def __init__(self, static):
"""
Augment the default XEP-0030 static handler object.
Arguments:
static -- The default static XEP-0030 handler object.
"""
self.static = static
def set_extended_info(self, jid, node, data):
"""
Replace the extended identity data for a JID/node combination.
The data parameter may provide:
data -- Either a single data form, or a list of data forms.
"""
self.del_extended_info(jid, node, data)
self.add_extended_info(jid, node, data)
def add_extended_info(self, jid, node, data):
"""
Add additional extended identity data for a JID/node combination.
The data parameter may provide:
data -- Either a single data form, or a list of data forms.
"""
self.static.add_node(jid, node)
forms = data.get('data', [])
if not isinstance(forms, list):
forms = [forms]
for form in forms:
self.static.nodes[(jid, node)]['info'].append(form)
def del_extended_info(self, jid, node, data):
"""
Replace the extended identity data for a JID/node combination.
The data parameter is not used.
"""
if (jid, node) not in self.static.nodes:
return
info = self.static.nodes[(jid, node)]['info']
for form in info['substanza']:
info.xml.remove(form.xml)
| mit |
HaloExchange/HaloBitcoin | qa/rpc-tests/getchaintips.py | 1 | 2125 | #!/usr/bin/env python2
# Copyright (c) 2014 The Halo Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework import BitcoinTestFramework
from util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].setgenerate (True, 10);
self.nodes[2].setgenerate (True, 20);
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| mit |
sunshinelover/chanlun | vn.lts/vnltsmd/test/mdtest.py | 23 | 4229 | # encoding: UTF-8
# 内置模块
import sys
from time import sleep
# 测试模块
from vnltsmd import *
#----------------------------------------------------------------------
def print_dict(d):
"""按照键值打印一个字典"""
for key,value in d.items():
print key + ':' + str(value)
#----------------------------------------------------------------------
def simple_log(func):
"""简单装饰器用于输出函数名"""
def wrapper(*args, **kw):
print ""
print str(func.__name__)
return func(*args, **kw)
return wrapper
########################################################################
class TestMdApi(MdApi):
"""测试用实例"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TestMdApi, self).__init__()
#----------------------------------------------------------------------
@simple_log
def onFrontConnected(self):
"""服务器连接"""
pass
#----------------------------------------------------------------------
@simple_log
def onFrontDisconnected(self, n):
"""服务器断开"""
print n
#----------------------------------------------------------------------
@simple_log
def onHeartBeatWarning(self, n):
"""心跳报警"""
print n
#----------------------------------------------------------------------
@simple_log
def onRspError(self, error, n, last):
"""错误"""
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
print_dict(data)
print_dict(error)
#----------------------------------------------------------------------
@simple_log
def onRtnDepthMarketData(self, data):
"""行情推送"""
print_dict(data)
#----------------------------------------------------------------------
def main():
"""主测试函数,出现堵塞时可以考虑使用sleep"""
reqid = 0
# 创建API对象
api = TestMdApi()
# 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址
api.createFtdcMdApi('')
# 注册前置机地址
api.registerFront("tcp://211.144.195.163:34513")
# 初始化api,连接前置机
api.init()
sleep(0.5)
# 登陆
loginReq = {} # 创建一个空字典
loginReq['UserID'] = '' # 参数作为字典键值的方式传入
loginReq['Password'] = '' # 键名和C++中的结构体成员名对应
loginReq['BrokerID'] = '2011'
reqid = reqid + 1 # 请求数必须保持唯一性
i = api.reqUserLogin(loginReq, 1)
sleep(0.5)
## 安全退出,测试通过
#i = api.exit()
#获取交易日,测试通过
day = api.getTradingDay()
print 'Trading Day is:' + str(day)
sleep(0.5)
# 订阅合约,测试通过
subReq = {}
subReq['InstrumentID'] = '510050'
subReq['ExchangeID'] = 'SSE'
i = api.subscribeMarketData(subReq)
## 退订合约,测试通过
#i = api.unSubscribeMarketData(subReq)
while 1:
sleep(1)
if __name__ == '__main__':
main()
| mit |
ykpoh/NLP | src/bigram.py | 1 | 3414 |
def printTextfile(fdist, fileName):
"""This function will print the given frequency distribution to
text file with the given filename"""
#Create the file with the given filename.
file = open(fileName, "w+")
#Write each bigram with its frequency into the file.
for k,v in fdist.items():
file.write("%s : %s\n" % (k,v))
def calSentenceBigram(userInput, tokendist, bidist):
"""This function will calculate the bigram probability over the whole
given sentence."""
#import nltk library
import nltk
#makes all user input into small capital letters
userInput = userInput.lower()
#tokenize user input into list using nltk.word_tokenize
userTokens = nltk.word_tokenize(userInput)
#initialize the probability
userSentenceProb = 1.0
#Calculate the bigram probability of the sentence through looping
for prevWord, nextWord in zip(userTokens, userTokens[1:]):
#get the frequency of the word exists in the database
wordCount = tokendist.get(prevWord)
#get the frequency of the occurence of prevWord and nextWord exists in the database
bigramCount = bidist.get((prevWord, nextWord))
#if it does not exists in our database, the bigram probability of prevWord and nextWord
#will be assigned with 1. It cannot be 0 because it will make the total probability 0.
if(bigramCount == None):
userSentenceProb *= float(1)
#if it exists in our database, it will calculate the bigram probability of the current
#prevWord and nextWord, then multiply with the total previous probability.
else:
userSentenceProb *= float(bigramCount)/float(wordCount)
return userSentenceProb
def calBestProb(userInput, tokendist, bidist):
"""This function will predict the next word user is going to key in based on
the bigram probability calculated from the given sentence"""
#import nltk library
import nltk
#makes all user input into small capital letters
userInput = userInput.lower()
#tokenize user input into list using nltk.word_tokenize
userTokens = nltk.word_tokenize(userInput)
#Get the bigram probability over the whole given sentence from calSentenceBigram.
userSentenceProb = calSentenceBigram(userInput, tokendist, bidist)
#Initialize the probabilities.
prob = 0.0
bestProb = 0
#Get the last word of the given sentence.
inputLastToken = userTokens[len(userTokens) - 1]
#Get the frequency of last word exists in our database.
lasttokenCount = tokendist.get(inputLastToken)
#By default, the predictedWord is undefined.
predictedWord = None
#Loop through bigram frequency distribution list.
for k,v in bidist.items():
#if the first word of the bigram is equal to inputLastToken,
#it will calculate its bigram probability
if(k[0] == inputLastToken):
#Calculate the bigram probability over the whole sentence.
prob = float(v)/float(lasttokenCount) * userSentenceProb
#if the current probability is higher than bestProb,
#bestProb will be replaced with this probability.
#Predicted word will be the word with this probability.
if prob > bestProb:
bestProb = prob
predictedWord = k[1]
return bestProb, predictedWord | mit |
twstrike/le_for_patching | letsencrypt/tests/test_util.py | 24 | 2267 | """Test utilities.
.. warning:: This module is not part of the public API.
"""
import os
import pkg_resources
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import OpenSSL
from acme import jose
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
return pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
else: # pragma: no cover
raise ValueError("Loader could not be recognized based on extension")
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_comparable_cert(*names):
"""Load ComparableX509 cert."""
return jose.ComparableX509(load_cert(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
| apache-2.0 |
alberthdev/pyradmon | docs/sphinxcontrib/napoleon/pavertasks.py | 4 | 3636 | # -*- coding: utf-8 -*-
# Copyright 2014 Rob Ruana
# Licensed under the BSD License, see LICENSE file for details.
"""Sphinx related paver tasks.
Methods
-------
apidoc
Derive reStructuredText API doc files from python source code.
This task is essentially a wrapper around the `sphinx-apidoc`_ script.
The following settings can be set on the options object:
* ``apidoc_excludes`` -- (*str or list of str*) A directory or list of
directories to exclude from doc generation. These should either be
absolute paths, or relative to `apidoc_moduledir`
* ``apidoc_moduledir`` -- (*str*) The root directory to search for Python
modules. Defaults to "."
* ``apidoc_outputdir`` -- (*str*) The output directory. Defaults to
`options.docroot/options.sourcedir`
* ``apidoc_overwrite`` -- (*bool*) True to overwrite existing files.
Defaults to True
.. _sphinx-apidoc: http://sphinx-doc.org/man/sphinx-apidoc.html
Example
-------
Creating API documentation is easy with a `pavement.py` file like this::
# pavement.py
from sphinxcontrib.napoleon.pavertasks import apidoc, html
from paver.easy import *
options(
sphinx=Bunch(
apidoc_excludes=['tests'],
apidoc_moduledir='sphinxcontrib/napoleon',
apidoc_outputdir='docs/source',
apidoc_overwrite=True,
builddir='build',
docroot='docs',
sourcedir='source',
),
)
And call::
$ paver apidoc
---> sphinxcontrib.napoleon.pavertasks.apidoc
sphinx-apidoc -f -o docs/source sphinxcontrib tests
Creating file docs/source/sphinxcontrib.rst.
Creating file docs/source/sphinxcontrib.napoleon.rst.
Creating file docs/source/modules.rst.
html
Build HTML documentation, including API documentation.
This task is a convenience method for calling `apidoc` followed by
`paver.docutils.html`. To use it, simply import it in your `pavement.py`
file::
from sphinxcontrib.napoleon.pavertasks import html
And call::
$ paver html
"""
import os
import sys
from paver.easy import BuildFailure, needs, task
try:
import sphinx
assert(sphinx)
has_sphinx = True
except ImportError:
has_sphinx = False
if sys.version_info[0] >= 3:
basestring = str
@task
def apidoc(options):
if not has_sphinx:
raise BuildFailure('Install sphinx to build html docs')
outputdir = options.get('apidoc_outputdir', '')
if not outputdir:
docroot = options.get('docroot', 'docs')
if not os.path.exists(docroot):
raise BuildFailure('Doc root dir (%s) does not exist' % docroot)
outputdir = os.path.join(docroot, options.get('sourcedir', ''))
if not os.path.exists(outputdir):
raise BuildFailure('Doc source dir (%s) does not exist' % outputdir)
moduledir = options.get('apidoc_moduledir', '.')
if not os.path.exists(moduledir):
raise BuildFailure('Module dir (%s) does not exist' % moduledir)
excludes = options.get('apidoc_excludes', [])
if isinstance(excludes, basestring):
excludes = [excludes]
if options.get('apidoc_overwrite', True):
args = ['sphinx-apidoc', '-f']
else:
args = ['sphinx-apidoc']
from sphinx.apidoc import main
args.extend(['-o', outputdir, moduledir] + excludes)
print(' '.join(args))
main(args)
@task
@needs('sphinxcontrib.napoleon.pavertasks.apidoc', 'paver.doctools.html')
def html(options):
pass
| apache-2.0 |
lifei/flask-admin | flask_admin/tests/test_model.py | 9 | 23799 | import wtforms
from nose.tools import eq_, ok_
from flask import Flask
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.test import Client
from wtforms import fields
from flask_admin import Admin, form
from flask_admin._compat import iteritems, itervalues
from flask_admin.model import base, filters
from flask_admin.model.template import macro
def wtforms2_and_up(func):
"""Decorator for skipping test if wtforms <2
"""
if int(wtforms.__version__[0]) < 2:
func.__test__ = False
return func
class Model(object):
def __init__(self, id=None, c1=1, c2=2, c3=3):
self.id = id
self.col1 = c1
self.col2 = c2
self.col3 = c3
class Form(form.BaseForm):
col1 = fields.StringField()
col2 = fields.StringField()
col3 = fields.StringField()
class SimpleFilter(filters.BaseFilter):
def apply(self, query):
query._applied = True
return query
def operation(self):
return 'test'
class MockModelView(base.BaseModelView):
def __init__(self, model, data=None, name=None, category=None,
endpoint=None, url=None, **kwargs):
# Allow to set any attributes from parameters
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(MockModelView, self).__init__(model, name, category, endpoint, url)
self.created_models = []
self.updated_models = []
self.deleted_models = []
self.search_arguments = []
if data is None:
self.all_models = {1: Model(1), 2: Model(2)}
else:
self.all_models = data
self.last_id = len(self.all_models) + 1
# Scaffolding
def get_pk_value(self, model):
return model.id
def scaffold_list_columns(self):
columns = ['col1', 'col2', 'col3']
if self.column_exclude_list:
return filter(lambda x: x not in self.column_exclude_list, columns)
return columns
def init_search(self):
return bool(self.column_searchable_list)
def scaffold_filters(self, name):
return [SimpleFilter(name)]
def scaffold_sortable_columns(self):
return ['col1', 'col2', 'col3']
def scaffold_form(self):
return Form
# Data
def get_list(self, page, sort_field, sort_desc, search, filters,
page_size=None):
self.search_arguments.append((page, sort_field, sort_desc, search, filters))
return len(self.all_models), itervalues(self.all_models)
def get_one(self, id):
return self.all_models.get(int(id))
def create_model(self, form):
model = Model(self.last_id)
self.last_id += 1
form.populate_obj(model)
self.created_models.append(model)
self.all_models[model.id] = model
return True
def update_model(self, form, model):
form.populate_obj(model)
self.updated_models.append(model)
return True
def delete_model(self, model):
self.deleted_models.append(model)
return True
def setup():
app = Flask(__name__)
app.config['CSRF_ENABLED'] = False
app.secret_key = '1'
admin = Admin(app)
return app, admin
def test_mockview():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
eq_(view.model, Model)
eq_(view.name, 'Model')
eq_(view.endpoint, 'model')
# Verify scaffolding
eq_(view._sortable_columns, ['col1', 'col2', 'col3'])
eq_(view._create_form_class, Form)
eq_(view._edit_form_class, Form)
eq_(view._search_supported, False)
eq_(view._filters, None)
client = app.test_client()
# Make model view requests
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
# Test model creation view
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model/new/',
data=dict(col1='test1', col2='test2', col3='test3'))
eq_(rv.status_code, 302)
eq_(len(view.created_models), 1)
model = view.created_models.pop()
eq_(model.id, 3)
eq_(model.col1, 'test1')
eq_(model.col2, 'test2')
eq_(model.col3, 'test3')
# Try model edit view
rv = client.get('/admin/model/edit/?id=3')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1' in data)
rv = client.post('/admin/model/edit/?id=3',
data=dict(col1='test!', col2='test@', col3='test#'))
eq_(rv.status_code, 302)
eq_(len(view.updated_models), 1)
model = view.updated_models.pop()
eq_(model.col1, 'test!')
eq_(model.col2, 'test@')
eq_(model.col3, 'test#')
rv = client.get('/admin/model/edit/?id=4')
eq_(rv.status_code, 302)
# Attempt to delete model
rv = client.post('/admin/model/delete/?id=3')
eq_(rv.status_code, 302)
eq_(rv.headers['location'], 'http://localhost/admin/model/')
# Create a dispatched application to test that edit view's "save and
# continue" functionality works when app is not located at root
dummy_app = Flask('dummy_app')
dispatched_app = DispatcherMiddleware(dummy_app, {'/dispatched': app})
dispatched_client = Client(dispatched_app)
app_iter, status, headers = dispatched_client.post(
'/dispatched/admin/model/edit/?id=3',
data=dict(col1='another test!', col2='test@', col3='test#', _continue_editing='True'))
eq_(status, '302 FOUND')
eq_(headers['Location'], 'http://localhost/dispatched/admin/model/edit/?id=3')
model = view.updated_models.pop()
eq_(model.col1, 'another test!')
def test_permissions():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.can_create = False
rv = client.get('/admin/model/new/')
eq_(rv.status_code, 302)
view.can_edit = False
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.status_code, 302)
view.can_delete = False
rv = client.post('/admin/model/delete/?id=1')
eq_(rv.status_code, 302)
def test_templates():
app, admin = setup()
view = MockModelView(Model)
admin.add_view(view)
client = app.test_client()
view.list_template = 'mock.html'
view.create_template = 'mock.html'
view.edit_template = 'mock.html'
rv = client.get('/admin/model/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/new/')
eq_(rv.data, b'Success!')
rv = client.get('/admin/model/edit/?id=1')
eq_(rv.data, b'Success!')
def test_list_columns():
app, admin = setup()
view = MockModelView(Model,
column_list=['col1', 'col3'],
column_labels=dict(col1='Column1'))
admin.add_view(view)
eq_(len(view._list_columns), 2)
eq_(view._list_columns, [('col1', 'Column1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Column1' in data)
ok_('Col2' not in data)
def test_exclude_columns():
app, admin = setup()
view = MockModelView(Model, column_exclude_list=['col2'])
admin.add_view(view)
eq_(view._list_columns, [('col1', 'Col1'), ('col3', 'Col3')])
client = app.test_client()
rv = client.get('/admin/model/')
data = rv.data.decode('utf-8')
ok_('Col1' in data)
ok_('Col2' not in data)
def test_sortable_columns():
app, admin = setup()
view = MockModelView(Model, column_sortable_list=['col1', ('col2', 'test1')])
admin.add_view(view)
eq_(view._sortable_columns, dict(col1='col1', col2='test1'))
def test_column_searchable_list():
app, admin = setup()
view = MockModelView(Model, column_searchable_list=['col1', 'col2'])
admin.add_view(view)
eq_(view._search_supported, True)
# TODO: Make calls with search
def test_column_filters():
app, admin = setup()
view = MockModelView(Model, column_filters=['col1', 'col2'])
admin.add_view(view)
eq_(len(view._filters), 2)
eq_(view._filters[0].name, 'col1')
eq_(view._filters[1].name, 'col2')
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col1']], [(0, 'test')])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'col2']], [(1, 'test')])
# TODO: Make calls with filters
def test_filter_list_callable():
app, admin = setup()
flt = SimpleFilter('test', options=lambda: [('1', 'Test 1'), ('2', 'Test 2')])
view = MockModelView(Model, column_filters=[flt])
admin.add_view(view)
opts = flt.get_options(view)
eq_(len(opts), 2)
eq_(opts, [('1', 'Test 1'), ('2', 'Test 2')])
def test_form():
# TODO: form_columns
# TODO: form_excluded_columns
# TODO: form_args
# TODO: form_widget_args
pass
@wtforms2_and_up
def test_csrf():
class SecureModelView(MockModelView):
form_base_class = form.SecureForm
def scaffold_form(self):
return form.SecureForm
def get_csrf_token(data):
data = data.split('name="csrf_token" type="hidden" value="')[1]
token = data.split('"')[0]
return token
app, admin = setup()
view = SecureModelView(Model, endpoint='secure')
admin.add_view(view)
client = app.test_client()
################
# create_view
################
rv = client.get('/admin/secure/new/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Create without CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1'))
eq_(rv.status_code, 200)
# Create with CSRF token
rv = client.post('/admin/secure/new/', data=dict(name='test1',
csrf_token=csrf_token))
eq_(rv.status_code, 302)
###############
# edit_view
###############
rv = client.get('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Edit without CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1'))
eq_(rv.status_code, 200)
# Edit with CSRF token
rv = client.post('/admin/secure/edit/?url=%2Fadmin%2Fsecure%2F&id=1',
data=dict(name='test1', csrf_token=csrf_token))
eq_(rv.status_code, 302)
################
# delete_view
################
rv = client.get('/admin/secure/')
eq_(rv.status_code, 200)
ok_(u'name="csrf_token"' in rv.data.decode('utf-8'))
csrf_token = get_csrf_token(rv.data.decode('utf-8'))
# Delete without CSRF token, test validation errors
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/"), follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' not in rv.data.decode('utf-8'))
ok_(u'Failed to delete record.' in rv.data.decode('utf-8'))
# Delete with CSRF token
rv = client.post('/admin/secure/delete/',
data=dict(id="1", url="/admin/secure/", csrf_token=csrf_token),
follow_redirects=True)
eq_(rv.status_code, 200)
ok_(u'Record was successfully deleted.' in rv.data.decode('utf-8'))
def test_custom_form():
app, admin = setup()
class TestForm(form.BaseForm):
pass
view = MockModelView(Model, form=TestForm)
admin.add_view(view)
eq_(view._create_form_class, TestForm)
eq_(view._edit_form_class, TestForm)
ok_(not hasattr(view._create_form_class, 'col1'))
def test_modal_edit():
# bootstrap 2 - test edit_modal
app_bs2 = Flask(__name__)
admin_bs2 = Admin(app_bs2, template_mode="bootstrap2")
edit_modal_on = MockModelView(Model, edit_modal=True,
endpoint="edit_modal_on")
edit_modal_off = MockModelView(Model, edit_modal=False,
endpoint="edit_modal_off")
create_modal_on = MockModelView(Model, create_modal=True,
endpoint="create_modal_on")
create_modal_off = MockModelView(Model, create_modal=False,
endpoint="create_modal_off")
admin_bs2.add_view(edit_modal_on)
admin_bs2.add_view(edit_modal_off)
admin_bs2.add_view(create_modal_on)
admin_bs2.add_view(create_modal_off)
client_bs2 = app_bs2.test_client()
# bootstrap 2 - ensure modal window is added when edit_modal is enabled
rv = client_bs2.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test edit modal disabled
rv = client_bs2.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 2 - ensure modal window is added when create_modal is enabled
rv = client_bs2.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test create modal disabled
rv = client_bs2.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3
app_bs3 = Flask(__name__)
admin_bs3 = Admin(app_bs3, template_mode="bootstrap3")
admin_bs3.add_view(edit_modal_on)
admin_bs3.add_view(edit_modal_off)
admin_bs3.add_view(create_modal_on)
admin_bs3.add_view(create_modal_off)
client_bs3 = app_bs3.test_client()
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/create_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/create_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
def check_class_name():
class DummyView(MockModelView):
pass
view = DummyView(Model)
eq_(view.name, 'Dummy View')
def test_export_csv():
app, admin = setup()
client = app.test_client()
# test redirect when csv export is disabled
view = MockModelView(Model, column_list=['col1', 'col2'], endpoint="test")
admin.add_view(view)
rv = client.get('/admin/test/export/csv/')
eq_(rv.status_code, 302)
# basic test of csv export with a few records
view_data = {
1: Model(1, "col1_1", "col2_1"),
2: Model(2, "col1_2", "col2_2"),
3: Model(3, "col1_3", "col2_3"),
}
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'])
admin.add_view(view)
rv = client.get('/admin/model/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,col2_1\r\n"
"col1_2,col2_2\r\n"
"col1_3,col2_3\r\n" == data)
# test explicit use of column_export_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_list=['id','col1','col2'],
endpoint='exportinclusion')
admin.add_view(view)
rv = client.get('/admin/exportinclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Id,Col1,Col2\r\n"
"1,col1_1,col2_1\r\n"
"2,col1_2,col2_2\r\n"
"3,col1_3,col2_3\r\n" == data)
# test explicit use of column_export_exclude_list
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'],
column_export_exclude_list=['col2'],
endpoint='exportexclusion')
admin.add_view(view)
rv = client.get('/admin/exportexclusion/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.mimetype, 'text/csv')
eq_(rv.status_code, 200)
ok_("Col1\r\n"
"col1_1\r\n"
"col1_2\r\n"
"col1_3\r\n" == data)
# test utf8 characters in csv export
view_data[4] = Model(1, u'\u2013ut8_1\u2013', u'\u2013utf8_2\u2013')
view = MockModelView(Model, view_data, can_export=True,
column_list=['col1', 'col2'], endpoint="utf8")
admin.add_view(view)
rv = client.get('/admin/utf8/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_(u'\u2013ut8_1\u2013,\u2013utf8_2\u2013\r\n' in data)
# test None type, integer type, column_labels, and column_formatters
view_data = {
1: Model(1, "col1_1", 1),
2: Model(2, "col1_2", 2),
3: Model(3, None, 3),
}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_labels={'col1': 'Str Field', 'col2': 'Int Field'},
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2),
endpoint="types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Str Field,Int Field\r\n"
"col1_1,2\r\n"
"col1_2,4\r\n"
",6\r\n" == data)
# test column_formatters_export and column_formatters_export
type_formatters = {type(None): lambda view, value: "null"}
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters_export=dict(col2=lambda v, c, m, p: m.col2*3),
column_formatters=dict(col2=lambda v, c, m, p: m.col2*2), # overridden
column_type_formatters_export=type_formatters,
endpoint="export_types_and_formatters"
)
admin.add_view(view)
rv = client.get('/admin/export_types_and_formatters/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,3\r\n"
"col1_2,6\r\n"
"null,9\r\n" == data)
# Macros are not implemented for csv export yet and will throw an error
view = MockModelView(
Model, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
endpoint="macro_exception"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# We should be able to specify column_formatters_export
# and not get an exception if a column_formatter is using a macro
def export_formatter(v, c, m, p):
return m.col1 if m else ''
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
column_formatters_export=dict(col1=export_formatter),
endpoint="macro_exception_formatter_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_formatter_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col1,Col2\r\n"
"col1_1,1\r\n"
"col1_2,2\r\n"
",3\r\n" == data)
# We should not get an exception if a column_formatter is
# using a macro but it is on the column_export_exclude_list
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
column_export_exclude_list=['col1'],
endpoint="macro_exception_exclude_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_exclude_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col2\r\n"
"1\r\n"
"2\r\n"
"3\r\n" == data)
# When we use column_export_list to hide the macro field
# we should not get an exception
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
column_export_list=['col2'],
endpoint="macro_exception_list_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_list_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 200)
ok_("Col2\r\n"
"1\r\n"
"2\r\n"
"3\r\n" == data)
# If they define a macro on the column_formatters_export list
# then raise an exception
view = MockModelView(
Model, view_data, can_export=True, column_list=['col1', 'col2'],
column_formatters=dict(col1=macro('render_macro')),
endpoint="macro_exception_macro_override"
)
admin.add_view(view)
rv = client.get('/admin/macro_exception_macro_override/export/csv/')
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
def test_list_row_actions():
app, admin = setup()
client = app.test_client()
from flask_admin.model import template
# Test default actions
view = MockModelView(Model, endpoint='test')
admin.add_view(view)
actions = view.get_list_row_actions()
ok_(isinstance(actions[0], template.EditRowAction))
ok_(isinstance(actions[1], template.DeleteRowAction))
rv = client.get('/admin/test/')
eq_(rv.status_code, 200)
# Test default actions
view = MockModelView(Model, endpoint='test1', can_edit=False, can_delete=False, can_view_details=True)
admin.add_view(view)
actions = view.get_list_row_actions()
eq_(len(actions), 1)
ok_(isinstance(actions[0], template.ViewRowAction))
rv = client.get('/admin/test1/')
eq_(rv.status_code, 200)
# Test popups
view = MockModelView(Model, endpoint='test2',
can_view_details=True,
details_modal=True,
edit_modal=True)
admin.add_view(view)
actions = view.get_list_row_actions()
ok_(isinstance(actions[0], template.ViewPopupRowAction))
ok_(isinstance(actions[1], template.EditPopupRowAction))
ok_(isinstance(actions[2], template.DeleteRowAction))
rv = client.get('/admin/test2/')
eq_(rv.status_code, 200)
# Test custom views
view = MockModelView(Model, endpoint='test3',
column_extra_row_actions=[
template.LinkRowAction('glyphicon glyphicon-off', 'http://localhost/?id={row_id}'),
template.EndpointLinkRowAction('glyphicon glyphicon-test', 'test1.index_view')
])
admin.add_view(view)
actions = view.get_list_row_actions()
ok_(isinstance(actions[0], template.EditRowAction))
ok_(isinstance(actions[1], template.DeleteRowAction))
ok_(isinstance(actions[2], template.LinkRowAction))
ok_(isinstance(actions[3], template.EndpointLinkRowAction))
rv = client.get('/admin/test3/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('glyphicon-off' in data)
ok_('http://localhost/?id=' in data)
ok_('glyphicon-test' in data)
| bsd-3-clause |
tiny4579/tinykernel-gnex | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
2014c2g9/c2g9 | wsgi/static/reeborg/src/libraries/brython/Lib/unittest/test/test_setups.py | 791 | 16440 | import io
import sys
import unittest
def resultFactory(*_):
return unittest.TestResult()
class TestSetups(unittest.TestCase):
def getRunner(self):
return unittest.TextTestRunner(resultclass=resultFactory,
stream=io.StringIO())
def runTests(self, *cases):
suite = unittest.TestSuite()
for case in cases:
tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest.TestSuite())
realSuite.addTest(unittest.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest.TestSuite((Test1('testOne'),))
second = unittest.TestSuite((Test1('testTwo'),))
third = unittest.TestSuite((Test2('testOne'),))
fourth = unittest.TestSuite((Test2('testTwo'),))
fifth = unittest.TestSuite((Test3('testOne'),))
sixth = unittest.TestSuite((Test3('testTwo'),))
suite = unittest.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
raise unittest.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest.TestSuite()
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
with self.assertRaisesRegex(Exception, msg):
suite.debug()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
DalikarFT/CFVOP | venv/Lib/site-packages/pip/_vendor/distlib/version.py | 335 | 23711 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: String or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # minimum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is probably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| gpl-3.0 |
demharters/git_scripts | dist_4gbx_stsamc.py | 2 | 1658 | #! /usr/bin/env python
from MDAnalysis import *
#from MDAnalysis.analysis.align import *
import numpy
import math
u = Universe("init.pdb","temp.pos.pdb")
v = Universe("init.pdb")
# residues
a1 = u.selectAtoms("segid A and resid 46")
b1 = u.selectAtoms("segid B and resid 90")
a2 = u.selectAtoms("segid A and resid 69")
b2 = u.selectAtoms("segid B and resid 62")
a3 = u.selectAtoms("segid A and resid 77")
b3 = u.selectAtoms("segid B and resid 51")
# helices
a4 = u.selectAtoms("segid A and resid 46:77")
b4 = u.selectAtoms("segid B and resid 51:90")
f = open('helix_dist','w')
g = open('angle','w')
for ts in u.trajectory:
distance1 = numpy.linalg.norm(a1.centerOfMass() - b1.centerOfMass())
distance2 = numpy.linalg.norm(a2.centerOfMass() - b2.centerOfMass())
distance3 = numpy.linalg.norm(a3.centerOfMass() - b3.centerOfMass())
distance4 = numpy.linalg.norm(a4.centerOfMass() - b4.centerOfMass())
a4_1,a4_2,a4_3 = a4.principalAxes()
b4_1,b4_2,b4_3 = b4.principalAxes()
# helix12_1,helix12_2,helix12_3 = helix12.principalAxes()
# helix21_1,helix21_2,helix21_3 = helix21.principalAxes()
# helix22_1,helix22_2,helix22_3 = helix22.principalAxes()
angle = math.degrees(math.acos(numpy.dot(a4_1,b4_1)))
# angle2 = math.degrees(math.acos(numpy.dot(helix21_1,helix22_1)))
if angle > 90:
angle = 180-angle
# print "%6i %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (ts.frame,rmsd0,rmsd1,rmsd2,distance1,distance2,angle1,angle2)
f.write('%7.3f %7.3f % 7.3f % 7.3f\n' % (distance1,distance2,distance3,distance4))
g.write('%7.3f\n' % angle)
f.close()
g.close()
| apache-2.0 |
chirilo/kuma | kuma/demos/admin.py | 22 | 6195 | from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.utils import model_ngettext, get_deleted_objects
from django.db import router
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
from kuma.core.managers import NamespacedTaggableManager
from taggit.forms import TagWidget
from .models import Submission
def censor_selected(modeladmin, request, queryset):
"""
Censor the selected submissions, with confirmation interstitial.
Largely stolen from django.contrib.admin.actions.delete_selected
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
censored_url = request.POST.get('censored_url', None)
n = queryset.count()
if n:
for obj in queryset:
obj.censor(url=censored_url)
obj_display = force_unicode(obj)
modeladmin.message_user(request, _("Censored %(item)s") % {
"item": obj_display
})
modeladmin.message_user(
request,
_("Successfully censored %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
context = {
"title": _("Are you sure?"),
"object_name": objects_name,
"queryset": queryset,
"opts": opts,
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(
request,
'admin/demos/submission/censor_selected_confirmation.html',
context, current_app=modeladmin.admin_site.name)
censor_selected.short_description = ugettext_lazy("Censor selected %(verbose_name_plural)s")
def delete_selected(modeladmin, request, queryset):
"""
The out-of-box Django delete never calls Submission.delete(), so this is a
mostly redundant lift-and-hack to ensure that happens. This is important
because Submission.delete() also cleans up its uploaded files.
See also: https://docs.djangoproject.com/en/dev/ref/contrib/admin/actions/
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_unicode(obj)
modeladmin.log_deletion(request, obj, obj_display)
obj.delete()
modeladmin.message_user(
request,
_("Deleted and uploaded files for %(item)s") % {
"item": obj_display
})
modeladmin.message_user(
request,
_("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": objects_name,
"deletable_objects": [deletable_objects],
"queryset": queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, current_app=modeladmin.admin_site.name)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
class SubmissionAdmin(admin.ModelAdmin):
actions = (delete_selected, censor_selected)
list_display = ('title', 'creator', 'featured', 'censored', 'hidden',
'taggit_tags', 'modified')
list_editable = ('featured', 'taggit_tags')
search_fields = ('title', 'summary', 'description', 'taggit_tags__name')
list_filter = ('censored', 'hidden', 'created', 'modified')
readonly_fields = ('censored',)
formfield_overrides = {
NamespacedTaggableManager: {
"widget": TagWidget(attrs={"size": 70})
}
}
def get_queryset(self, request):
return Submission.admin_manager.all()
admin.site.register(Submission, SubmissionAdmin)
| mpl-2.0 |
Fl0rianFischer/sme_odoo | openerp/tools/parse_version.py | 70 | 3507 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
## this functions are taken from the setuptools package (version 0.6c8)
## http://peak.telecommunity.com/DevCenter/PkgResources#parsing-utilities
import re
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','_':'final-','rc':'c','dev':'@','saas':'','~':''}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", whic in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them.
"""
parts = []
for part in _parse_version_parts((s or '0.1').lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
if __name__ == '__main__':
def cmp(a, b):
msg = '%s < %s == %s' % (a, b, a < b)
assert a < b, msg
return b
def chk(lst, verbose=False):
pvs = []
for v in lst:
pv = parse_version(v)
pvs.append(pv)
if verbose:
print v, pv
reduce(cmp, pvs)
chk(('0', '4.2', '4.2.3.4', '5.0.0-alpha', '5.0.0-rc1', '5.0.0-rc1.1', '5.0.0_rc2', '5.0.0_rc3', '5.0.0'), False)
chk(('5.0.0-0_rc3', '5.0.0-1dev', '5.0.0-1'), False)
| gpl-3.0 |
masterpowers/angular-laravel | node_modules/laravel-elixir/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 446 | 43487 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| mit |
iptvgratis/TUPLAY | resources/tools/server_rtmp.py | 4 | 20015 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# Analizador de RTMPs by Juarrox (juarrox@gmail.com)
# Version 0.3.4 (28.04.2015)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#------------------------------------------------------------
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info
#------------------------------------------------------------
import os
import sys
import urllib
import urllib2
import re
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
from __main__ import *
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
# Regex de canales
#from resources.regex.shidurlive import *
from resources.regex.vaughnlive import *
from resources.regex.ninestream import *
from resources.regex.vercosas import *
from resources.regex.castalba import *
from resources.regex.castdos import *
from resources.regex.directwatch import *
from resources.regex.freetvcast import *
from resources.regex.freebroadcast import *
from resources.regex.sawlive import *
from resources.regex.broadcastlive import *
from resources.regex.businessapp import *
from resources.regex.rdmcast import *
from resources.regex.dinozap import *
from resources.regex.streamingfreetv import *
from resources.regex.byetv import *
from resources.regex.ezcast import *
from resources.regex.ucaster import *
from resources.regex.iguide import *
def server_rtmp(params):
plugintools.log('[%s %s].server_rtmp %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
if url.find("iguide.to") >= 0:
server = 'iguide'
elif url.find("freetvcast.pw") >= 0:
server = 'freetvcast'
elif url.find("9stream") >= 0:
server = '9stream'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("businessapp1") >= 0:
server = 'businessapp1'
elif url.find("miplayer.net") >= 0:
server = 'miplayernet'
elif url.find("janjua") >= 0:
server = 'janjua'
elif url.find("rdmcast") >= 0:
server = 'rdmcast'
elif url.find("freebroadcast") >= 0:
server = 'freebroadcast'
if url.find("timeout") < 0:
url = url + ' timeout=15'
elif url.find("goodgame.ru") >= 0:
server = 'goodgame.ru'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("hdcast") >= 0:
server = 'hdcast'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("sharecast") >= 0:
server = 'sharecast'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("cast247") >= 0:
server = 'cast247'
elif url.find("castalba") >= 0:
server = 'castalba'
elif url.find("direct2watch") >= 0:
server = 'direct2watch'
elif url.find("vaughnlive") >= 0:
server = 'vaughnlive'
elif url.find("sawlive") >= 0:
server = 'sawlive'
elif url.find("streamingfreetv") >= 0:
server = 'streamingfreetv'
elif url.find("totalplay") >= 0:
server = 'totalplay'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("shidurlive") >= 0:
server = 'shidurlive'
elif url.find("everyon") >= 0:
server = 'everyon'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("iviplanet") >= 0:
server = 'iviplanet'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("cxnlive") >= 0:
server = 'cxnlive'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("ucaster") >= 0:
server = 'ucaster'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("mediapro") >= 0:
server = 'mediapro'
if url.find("timeout") < 0:
url = url + ' timeout=15'
elif url.find("veemi") >= 0:
server = 'veemi'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("yukons.net") >= 0:
server = 'yukons.net'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("janjua") >= 0:
server = 'janjua'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("mips") >= 0:
server = 'mips'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("zecast") >= 0:
server = 'zecast'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("vertvdirecto") >= 0:
server = 'vertvdirecto'
elif url.find("filotv") >= 0:
server = 'filotv'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("dinozap") >= 0:
server = 'dinozap'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("ezcast") >= 0:
server = 'ezcast'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("flashstreaming") >= 0:
server = 'flashstreaming'
if url.find("timeout") < 0:
url = url + ' timeout=15'
elif url.find("shidurlive") >= 0:
server = 'shidurlive'
elif url.find("multistream") >= 0:
server = 'multistream'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("playfooty") >= 0:
server = 'playfooty'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
params["url"]=url
elif url.find("flashtv") >= 0:
server = 'flashtv'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
params["url"]=url
else:
url = url + ' timeout=15'
elif url.find("04stream") >= 0:
server = '04stream'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("vercosas") >= 0:
server = 'vercosasgratis'
elif url.find("broadcastlive") >= 0:
server = 'broadcastlive'
elif url.find("dcast") >= 0:
server = 'dcast'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("playfooty") >= 0:
server = 'playfooty'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("pvtserverz") >= 0:
server = 'pvtserverz'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
elif url.find("byetv") >= 0:
server = 'byetv'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
else:
server = 'rtmp'
if url.find("timeout") < 0:
if url.endswith("conn=S:OK") == True: # Control para aquellos servidores que requieran al final de la URL la expresión: conn=S:OK
url = url.replace("conn=S:OK", "").strip()
url = url + ' timeout=15 conn=S:OK'
else:
url = url + ' timeout=15'
params["url"] = url
params["server"] = server
def launch_rtmp(params):
plugintools.log('[%s %s].launch_rtmp %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
title = params.get("title")
title = title.replace("[/COLOR]", "")
title = title.strip()
plugintools.log("Vamos a buscar en el título: "+title)
if title.endswith("[9stream]") == True:
server = '9stream'
ninestreams(params)
elif title.endswith("[iguide]") == True:
server = 'iguide'
iguide0(params)
elif title.endswith("[streamingfreetv]") == True:
server = 'streamingfreetv'
streamingfreetv0(params)
elif title.endswith("[vercosasgratis]") == True:
server = 'vercosasgratis'
vercosas(params)
elif title.endswith("[freebroadcast]") == True:
server = 'freebroadcast'
freebroadcast(params)
elif title.endswith("[ucaster]") == True:
server = 'ucaster'
ucaster0(params)
elif title.endswith("[direct2watch]") == True:
server = 'direct2watch'
directwatch(params)
elif title.endswith("[shidurlive]") == True:
server = 'shidurlive'
plugintools.play_resolved_url(url)
elif title.endswith("[vaughnlive]") == True:
server = 'vaughnlive'
resolve_vaughnlive(params)
elif title.endswith("[cast247]") == True:
server = 'cast247'
castdos(params)
elif title.endswith("[ezcast]") == True:
server = 'ezcast'
ezcast0(params)
elif title.endswith("[businessapp1]") == True:
server = 'businessapp'
businessapp0(params)
elif title.endswith("[miplayer.net]") == True:
server = 'miplayernet'
miplayernet0(params)
elif title.endswith("[janjua]") == True:
server = 'janjua'
janjua0(params)
elif title.endswith("[rdmcast]") == True:
server = 'rdmcast'
rdmcast0(params)
elif title.endswith("[byetv]") == True:
server = 'byetv'
byetv0(params)
elif url.find("hdcast") >= 0:
server = 'hdcast'
plugintools.play_resolved_url(url)
elif url.find("janjua") >= 0:
server = 'janjua'
janjua0(params)
elif url.find("mips") >= 0:
server = 'mips'
plugintools.play_resolved_url(url)
elif url.find("zecast") >= 0:
server = 'zecast'
plugintools.play_resolved_url(url)
elif url.find("filotv") >= 0:
server = 'filotv'
print "filotv"
plugintools.play_resolved_url(url)
elif url.find("flashstreaming") >= 0:
server = 'flashstreaming'
plugintools.play_resolved_url(url)
elif url.find("multistream") >= 0:
server = 'multistream'
print "multistream"
plugintools.play_resolved_url(url)
elif url.find("playfooty") >= 0:
server = 'playfooty'
plugintools.play_resolved_url(url)
elif url.find("flashtv") >= 0:
server = 'flashtv'
print "flashtv"
plugintools.play_resolved_url(url)
elif url.find("freetvcast") >= 0:
server = 'freetvcast'
print "freetvcast"
freetvcast(params)
elif url.find("04stream") >= 0:
server = '04stream'
plugintools.play_resolved_url(url)
elif url.find("sharecast") >= 0:
server = 'sharecast'
plugintools.play_resolved_url(url)
elif url.find("sawlive") >= 0:
server = 'sawlive'
sawlive(params)
elif url.find("goodcast") >= 0:
server = 'goodcast'
plugintools.play_resolved_url(url)
elif url.find("broadcastlive") >= 0:
server = 'broadcastlive'
broadcastlive1(params)
elif url.find("dinozap") >= 0:
server = 'dinozap'
dinozap0(params)
elif url.find("dcast.tv") >= 0:
server = 'dcast.tv'
plugintools.play_resolved_url(url)
elif url.find("castalba") >= 0:
server = 'castalba'
castalba(params)
elif url.find("tutelehd.com") >= 0:
server = 'tutelehd.com'
plugintools.play_resolved_url(url)
elif url.find("flexstream") >= 0:
server = 'flexstream'
plugintools.play_resolved_url(url)
elif url.find("xxcast") >= 0:
server = 'xxcast'
plugintools.play_resolved_url(url)
elif url.find("vipi.tv") >= 0:
server = 'vipi.tv'
plugintools.play_resolved_url(url)
elif url.find("watchjsc") >= 0:
server = 'watchjsc'
plugintools.play_resolved_url(url)
elif url.find("zenex.tv") >= 0:
server = 'zenex.tv'
plugintools.play_resolved_url(url)
elif url.find("castto") >= 0:
server = 'castto'
plugintools.play_resolved_url(url)
elif url.find("tvzune") >= 0:
server = 'tvzune'
plugintools.play_resolved_url(url)
elif url.find("flashcast") >= 0:
server = 'flashcast'
plugintools.play_resolved_url(url)
elif url.find("ilive.to") >= 0:
server = 'ilive.to'
plugintools.play_resolved_url(url)
elif url.find("janjua") >= 0:
server = 'janjua'
janjua0(params)
else:
print "No ha encontrado launcher"
server = 'rtmp'
plugintools.play_resolved_url(url)
| gpl-3.0 |
hand-iemura/lightpng | boost_1_53_0/tools/build/v2/test/custom_generator.py | 42 | 1608 | #!/usr/bin/python
# Copyright 2003, 2004, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Attempt to declare a generator for creating OBJ from RC files. That generator
# should be considered together with standard CPP->OBJ generators and
# successfully create the target. Since we do not have a RC compiler everywhere,
# we fake the action. The resulting OBJ will be unusable, but it must be
# created.
import BoostBuild
t = BoostBuild.Tester()
t.write("jamroot.jam", """
import rcc ;
""")
t.write("rcc.jam", """
import type ;
import generators ;
import print ;
# Use 'RCC' to avoid conflicts with definitions in the standard rc.jam and
# msvc.jam
type.register RCC : rcc ;
rule resource-compile ( targets * : sources * : properties * )
{
print.output $(targets[1]) ;
print.text "rc-object" ;
}
generators.register-standard rcc.resource-compile : RCC : OBJ ;
""")
t.write("rcc.py", """
import b2.build.type as type
import b2.build.generators as generators
from b2.manager import get_manager
# Use 'RCC' to avoid conflicts with definitions in the standard rc.jam and
# msvc.jam
type.register('RCC', ['rcc'])
generators.register_standard("rcc.resource-compile", ["RCC"], ["OBJ"])
get_manager().engine().register_action(
"rcc.resource-compile",
'@($(STDOUT):E=rc-object) > "$(<)"')
""")
t.write("jamfile.jam", """
obj r : r.rcc ;
""")
t.write("r.rcc", """
""")
t.run_build_system()
t.expect_content("bin/$toolset/debug/r.obj", "rc-object")
t.cleanup()
| mit |
diofeher/django-nfa | tests/regressiontests/defaultfilters/tests.py | 2 | 12592 | # -*- coding: utf-8 -*-
r"""
>>> floatformat(7.7)
u'7.7'
>>> floatformat(7.0)
u'7'
>>> floatformat(0.7)
u'0.7'
>>> floatformat(0.07)
u'0.1'
>>> floatformat(0.007)
u'0.0'
>>> floatformat(0.0)
u'0'
>>> floatformat(7.7,3)
u'7.700'
>>> floatformat(6.000000,3)
u'6.000'
>>> floatformat(6.200000, 3)
u'6.200'
>>> floatformat(6.200000, -3)
u'6.200'
>>> floatformat(13.1031,-3)
u'13.103'
>>> floatformat(11.1197, -2)
u'11.12'
>>> floatformat(11.0000, -2)
u'11'
>>> floatformat(11.000001, -2)
u'11.00'
>>> floatformat(8.2798, 3)
u'8.280'
>>> floatformat(u'foo')
u''
>>> floatformat(13.1031, u'bar')
u'13.1031'
>>> floatformat(u'foo', u'bar')
u''
>>> floatformat(None)
u''
>>> addslashes(u'"double quotes" and \'single quotes\'')
u'\\"double quotes\\" and \\\'single quotes\\\''
>>> addslashes(ur'\ : backslashes, too')
u'\\\\ : backslashes, too'
>>> capfirst(u'hello world')
u'Hello world'
>>> escapejs(u'"double quotes" and \'single quotes\'')
u'\\"double quotes\\" and \\\'single quotes\\\''
>>> escapejs(ur'\ : backslashes, too')
u'\\\\ : backslashes, too'
>>> escapejs(u'and lots of whitespace: \r\n\t\v\f\b')
u'and lots of whitespace: \\r\\n\\t\\v\\f\\b'
>>> escapejs(ur'<script>and this</script>')
u'<script>and this<\\/script>'
>>> fix_ampersands(u'Jack & Jill & Jeroboam')
u'Jack & Jill & Jeroboam'
>>> linenumbers(u'line 1\nline 2')
u'1. line 1\n2. line 2'
>>> linenumbers(u'\n'.join([u'x'] * 10))
u'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. x\n08. x\n09. x\n10. x'
>>> lower('TEST')
u'test'
>>> lower(u'\xcb') # uppercase E umlaut
u'\xeb'
>>> make_list('abc')
[u'a', u'b', u'c']
>>> make_list(1234)
[u'1', u'2', u'3', u'4']
>>> slugify(' Jack & Jill like numbers 1,2,3 and 4 and silly characters ?%.$!/')
u'jack-jill-like-numbers-123-and-4-and-silly-characters'
>>> slugify(u"Un \xe9l\xe9phant \xe0 l'or\xe9e du bois")
u'un-elephant-a-loree-du-bois'
>>> stringformat(1, u'03d')
u'001'
>>> stringformat(1, u'z')
u''
>>> title('a nice title, isn\'t it?')
u"A Nice Title, Isn't It?"
>>> title(u'discoth\xe8que')
u'Discoth\xe8que'
>>> truncatewords(u'A sentence with a few words in it', 1)
u'A ...'
>>> truncatewords(u'A sentence with a few words in it', 5)
u'A sentence with a few ...'
>>> truncatewords(u'A sentence with a few words in it', 100)
u'A sentence with a few words in it'
>>> truncatewords(u'A sentence with a few words in it', 'not a number')
u'A sentence with a few words in it'
>>> truncatewords_html(u'<p>one <a href="#">two - three <br>four</a> five</p>', 0)
u''
>>> truncatewords_html(u'<p>one <a href="#">two - three <br>four</a> five</p>', 2)
u'<p>one <a href="#">two ...</a></p>'
>>> truncatewords_html(u'<p>one <a href="#">two - three <br>four</a> five</p>', 4)
u'<p>one <a href="#">two - three <br>four ...</a></p>'
>>> truncatewords_html(u'<p>one <a href="#">two - three <br>four</a> five</p>', 5)
u'<p>one <a href="#">two - three <br>four</a> five</p>'
>>> truncatewords_html(u'<p>one <a href="#">two - three <br>four</a> five</p>', 100)
u'<p>one <a href="#">two - three <br>four</a> five</p>'
>>> truncatewords_html(u'\xc5ngstr\xf6m was here', 1)
u'\xc5ngstr\xf6m ...'
>>> upper(u'Mixed case input')
u'MIXED CASE INPUT'
>>> upper(u'\xeb') # lowercase e umlaut
u'\xcb'
>>> urlencode(u'fran\xe7ois & jill')
u'fran%C3%A7ois%20%26%20jill'
>>> urlencode(1)
u'1'
>>> iriencode(u'S\xf8r-Tr\xf8ndelag')
u'S%C3%B8r-Tr%C3%B8ndelag'
>>> iriencode(urlencode(u'fran\xe7ois & jill'))
u'fran%C3%A7ois%20%26%20jill'
>>> urlizetrunc(u'http://short.com/', 20)
u'<a href="http://short.com/" rel="nofollow">http://short.com/</a>'
>>> urlizetrunc(u'http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=', 20)
u'<a href="http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=" rel="nofollow">http://www.google...</a>'
>>> urlizetrunc('http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=', 20)
u'<a href="http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=" rel="nofollow">http://www.google...</a>'
# Check truncating of URIs which are the exact length
>>> uri = 'http://31characteruri.com/test/'
>>> len(uri)
31
>>> urlizetrunc(uri, 31)
u'<a href="http://31characteruri.com/test/" rel="nofollow">http://31characteruri.com/test/</a>'
>>> urlizetrunc(uri, 30)
u'<a href="http://31characteruri.com/test/" rel="nofollow">http://31characteruri.com/t...</a>'
>>> urlizetrunc(uri, 2)
u'<a href="http://31characteruri.com/test/" rel="nofollow">...</a>'
# Check normal urlize
>>> urlize('http://google.com')
u'<a href="http://google.com" rel="nofollow">http://google.com</a>'
>>> urlize('http://google.com/')
u'<a href="http://google.com/" rel="nofollow">http://google.com/</a>'
>>> urlize('www.google.com')
u'<a href="http://www.google.com" rel="nofollow">www.google.com</a>'
>>> urlize('djangoproject.org')
u'<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>'
>>> urlize('info@djangoproject.org')
u'<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>'
# Check urlize with https addresses
>>> urlize('https://google.com')
u'<a href="https://google.com" rel="nofollow">https://google.com</a>'
>>> wordcount('')
0
>>> wordcount(u'oneword')
1
>>> wordcount(u'lots of words')
3
>>> wordwrap(u'this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14)
u"this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI'm afraid"
>>> wordwrap(u'this is a short paragraph of text.\n But this line should be indented',14)
u'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented'
>>> wordwrap(u'this is a short paragraph of text.\n But this line should be indented',15)
u'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented'
>>> ljust(u'test', 10)
u'test '
>>> ljust(u'test', 3)
u'test'
>>> rjust(u'test', 10)
u' test'
>>> rjust(u'test', 3)
u'test'
>>> center(u'test', 6)
u' test '
>>> cut(u'a string to be mangled', 'a')
u' string to be mngled'
>>> cut(u'a string to be mangled', 'ng')
u'a stri to be maled'
>>> cut(u'a string to be mangled', 'strings')
u'a string to be mangled'
>>> force_escape(u'<some html & special characters > here')
u'<some html & special characters > here'
>>> force_escape(u'<some html & special characters > here ĐÅ€£')
u'<some html & special characters > here \xc4\x90\xc3\x85\xe2\x82\xac\xc2\xa3'
>>> linebreaks(u'line 1')
u'<p>line 1</p>'
>>> linebreaks(u'line 1\nline 2')
u'<p>line 1<br />line 2</p>'
>>> removetags(u'some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags', 'script img')
u'some <b>html</b> with alert("You smell") disallowed tags'
>>> striptags(u'some <b>html</b> with <script>alert("You smell")</script> disallowed <img /> tags')
u'some html with alert("You smell") disallowed tags'
>>> sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'},
... {'age': 63, 'name': 'Ra Ra Rasputin'},
... {'name': 'Jonny B Goode', 'age': 18}], 'age')
>>> [sorted(dict.items()) for dict in sorted_dicts]
[[('age', 18), ('name', 'Jonny B Goode')], [('age', 23), ('name', 'Barbara-Ann')], [('age', 63), ('name', 'Ra Ra Rasputin')]]
>>> sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'},
... {'age': 63, 'name': 'Ra Ra Rasputin'},
... {'name': 'Jonny B Goode', 'age': 18}], 'age')
>>> [sorted(dict.items()) for dict in sorted_dicts]
[[('age', 63), ('name', 'Ra Ra Rasputin')], [('age', 23), ('name', 'Barbara-Ann')], [('age', 18), ('name', 'Jonny B Goode')]]
>>> first([0,1,2])
0
>>> first(u'')
u''
>>> first(u'test')
u't'
>>> join([0,1,2], u'glue')
u'0glue1glue2'
>>> length(u'1234')
4
>>> length([1,2,3,4])
4
>>> length_is([], 0)
True
>>> length_is([], 1)
False
>>> length_is('a', 1)
True
>>> length_is(u'a', 10)
False
>>> slice_(u'abcdefg', u'0')
u''
>>> slice_(u'abcdefg', u'1')
u'a'
>>> slice_(u'abcdefg', u'-1')
u'abcdef'
>>> slice_(u'abcdefg', u'1:2')
u'b'
>>> slice_(u'abcdefg', u'1:3')
u'bc'
>>> slice_(u'abcdefg', u'0::2')
u'aceg'
>>> unordered_list([u'item 1', u'item 2'])
u'\t<li>item 1</li>\n\t<li>item 2</li>'
>>> unordered_list([u'item 1', [u'item 1.1']])
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>'
>>> unordered_list([u'item 1', [u'item 1.1', u'item1.2'], u'item 2'])
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>'
>>> unordered_list([u'item 1', [u'item 1.1', [u'item 1.1.1', [u'item 1.1.1.1']]]])
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>'
>>> unordered_list(['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']])
u'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>'
# Old format for unordered lists should still work
>>> unordered_list([u'item 1', []])
u'\t<li>item 1</li>'
>>> unordered_list([u'item 1', [[u'item 1.1', []]]])
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>'
>>> unordered_list([u'item 1', [[u'item 1.1', []], [u'item 1.2', []]]])
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>'
>>> unordered_list(['States', [['Kansas', [['Lawrence', []], ['Topeka', []]]], ['Illinois', []]]])
u'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>'
>>> add(u'1', u'2')
3
>>> get_digit(123, 1)
3
>>> get_digit(123, 2)
2
>>> get_digit(123, 3)
1
>>> get_digit(123, 4)
0
>>> get_digit(123, 0)
123
>>> get_digit(u'xyz', 0)
u'xyz'
# real testing of date() is in dateformat.py
>>> date(datetime.datetime(2005, 12, 29), u"d F Y")
u'29 December 2005'
>>> date(datetime.datetime(2005, 12, 29), ur'jS o\f F')
u'29th of December'
# real testing of time() is done in dateformat.py
>>> time(datetime.time(13), u"h")
u'01'
>>> time(datetime.time(0), u"h")
u'12'
# real testing is done in timesince.py, where we can provide our own 'now'
>>> timesince(datetime.datetime.now() - datetime.timedelta(1))
u'1 day'
>>> default(u"val", u"default")
u'val'
>>> default(None, u"default")
u'default'
>>> default(u'', u"default")
u'default'
>>> default_if_none(u"val", u"default")
u'val'
>>> default_if_none(None, u"default")
u'default'
>>> default_if_none(u'', u"default")
u''
>>> divisibleby(4, 2)
True
>>> divisibleby(4, 3)
False
>>> yesno(True)
u'yes'
>>> yesno(False)
u'no'
>>> yesno(None)
u'maybe'
>>> yesno(True, u'certainly,get out of town,perhaps')
u'certainly'
>>> yesno(False, u'certainly,get out of town,perhaps')
u'get out of town'
>>> yesno(None, u'certainly,get out of town,perhaps')
u'perhaps'
>>> yesno(None, u'certainly,get out of town')
u'get out of town'
>>> filesizeformat(1023)
u'1023 bytes'
>>> filesizeformat(1024)
u'1.0 KB'
>>> filesizeformat(10*1024)
u'10.0 KB'
>>> filesizeformat(1024*1024-1)
u'1024.0 KB'
>>> filesizeformat(1024*1024)
u'1.0 MB'
>>> filesizeformat(1024*1024*50)
u'50.0 MB'
>>> filesizeformat(1024*1024*1024-1)
u'1024.0 MB'
>>> filesizeformat(1024*1024*1024)
u'1.0 GB'
>>> pluralize(1)
u''
>>> pluralize(0)
u's'
>>> pluralize(2)
u's'
>>> pluralize([1])
u''
>>> pluralize([])
u's'
>>> pluralize([1,2,3])
u's'
>>> pluralize(1,u'es')
u''
>>> pluralize(0,u'es')
u'es'
>>> pluralize(2,u'es')
u'es'
>>> pluralize(1,u'y,ies')
u'y'
>>> pluralize(0,u'y,ies')
u'ies'
>>> pluralize(2,u'y,ies')
u'ies'
>>> pluralize(0,u'y,ies,error')
u''
>>> phone2numeric(u'0800 flowers')
u'0800 3569377'
# Filters shouldn't break if passed non-strings
>>> addslashes(123)
u'123'
>>> linenumbers(123)
u'1. 123'
>>> lower(123)
u'123'
>>> make_list(123)
[u'1', u'2', u'3']
>>> slugify(123)
u'123'
>>> title(123)
u'123'
>>> truncatewords(123, 2)
u'123'
>>> upper(123)
u'123'
>>> urlencode(123)
u'123'
>>> urlize(123)
u'123'
>>> urlizetrunc(123, 1)
u'123'
>>> wordcount(123)
1
>>> wordwrap(123, 2)
u'123'
>>> ljust('123', 4)
u'123 '
>>> rjust('123', 4)
u' 123'
>>> center('123', 5)
u' 123 '
>>> center('123', 6)
u' 123 '
>>> cut(123, '2')
u'13'
>>> escape(123)
u'123'
>>> linebreaks(123)
u'<p>123</p>'
>>> linebreaksbr(123)
u'123'
>>> removetags(123, 'a')
u'123'
>>> striptags(123)
u'123'
"""
from django.template.defaultfilters import *
import datetime
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
mnahm5/django-estore | Lib/site-packages/werkzeug/contrib/lint.py | 131 | 12490 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith(('W/', 'w/')):
if etag.startswith('w/'):
warn(HTTPWarning('weak etag indicator should be upcase.'),
stacklevel=4)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| mit |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/IPython/lib/tests/test_deepreload.py | 9 | 2144 | # -*- coding: utf-8 -*-
"""Test suite for the deepreload module."""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from IPython.testing import decorators as dec
from IPython.utils.py3compat import builtin_mod_name, PY3
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
from IPython.lib.deepreload import reload as dreload
#-----------------------------------------------------------------------------
# Test functions begin
#-----------------------------------------------------------------------------
@dec.skipif_not_numpy
def test_deepreload_numpy():
"Test that NumPy can be deep reloaded."
import numpy
# TODO: Find a way to exclude all standard library modules from reloading.
exclude = [
# Standard exclusions:
'sys', 'os.path', builtin_mod_name, '__main__',
# Test-related exclusions:
'unittest', 'UserDict', '_collections_abc', 'tokenize',
'collections', 'collections.abc',
'importlib', 'importlib.machinery', '_imp',
'importlib._bootstrap', 'importlib._bootstrap_external',
'_frozen_importlib', '_frozen_importlib_external',
]
dreload(numpy, exclude=exclude)
def test_deepreload():
"Test that dreload does deep reloads and skips excluded modules."
with TemporaryDirectory() as tmpdir:
with prepended_to_syspath(tmpdir):
with open(os.path.join(tmpdir, 'A.py'), 'w') as f:
f.write("class Object(object):\n pass\n")
with open(os.path.join(tmpdir, 'B.py'), 'w') as f:
f.write("import A\n")
import A
import B
# Test that A is not reloaded.
obj = A.Object()
dreload(B, exclude=['A'])
nt.assert_true(isinstance(obj, A.Object))
# Test that A is reloaded.
obj = A.Object()
dreload(B)
nt.assert_false(isinstance(obj, A.Object))
| gpl-3.0 |
Gustry/QGIS | python/plugins/db_manager/dlg_field_properties.py | 30 | 3597 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dlg_field_properties.py
---------------------
Date : April 2012
Copyright : (C) 2012 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Giuseppe Sucameli'
__date__ = 'April 2012'
__copyright__ = '(C) 2012, Giuseppe Sucameli'
from qgis.PyQt.QtWidgets import QDialog, QMessageBox
from .db_plugins.plugin import TableField
from .ui.ui_DlgFieldProperties import Ui_DbManagerDlgFieldProperties as Ui_Dialog
class DlgFieldProperties(QDialog, Ui_Dialog):
def __init__(self, parent=None, fld=None, table=None, db=None):
QDialog.__init__(self, parent)
self.fld = fld
self.table = self.fld.table() if self.fld and self.fld.table() else table
self.db = self.table.database() if self.table and self.table.database() else db
self.setupUi(self)
for item in self.db.connector.fieldTypes():
self.cboType.addItem(item)
supportCom = self.db.supportsComment()
if not supportCom:
self.label_6.setVisible(False)
self.editCom.setVisible(False)
self.setField(fld)
self.buttonBox.accepted.connect(self.onOK)
def setField(self, fld):
if fld is None:
return
self.editName.setText(fld.name)
self.cboType.setEditText(fld.dataType)
if fld.modifier:
self.editLength.setText(str(fld.modifier))
self.chkNull.setChecked(not fld.notNull)
if fld.hasDefault:
self.editDefault.setText(fld.default)
tab = self.table.name
field = fld.name
res = self.db.connector.getComment(tab, field)
self.editCom.setText(res) # Set comment value
def getField(self, newCopy=False):
fld = TableField(self.table) if not self.fld or newCopy else self.fld
fld.name = self.editName.text()
fld.dataType = self.cboType.currentText()
fld.notNull = not self.chkNull.isChecked()
fld.default = self.editDefault.text()
fld.hasDefault = fld.default != ""
fld.comment = self.editCom.text()
# length field also used for geometry definition, so we should
# not cast its value to int
if self.editLength.text() != "":
fld.modifier = self.editLength.text()
else:
fld.modifier = None
return fld
def onOK(self):
""" first check whether everything's fine """
fld = self.getField(True) # don't change the original copy
if fld.name == "":
QMessageBox.critical(self, self.tr("DB Manager"), self.tr("Field name must not be empty."))
return
if fld.dataType == "":
QMessageBox.critical(self, self.tr("DB Manager"), self.tr("Field type must not be empty."))
return
self.accept()
| gpl-2.0 |
flexiant/xen | tools/python/xen/xend/server/DevController.py | 42 | 24407 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
from threading import Event
import types
from xen.xend import sxp, XendOptions
from xen.xend.XendError import VmError
from xen.xend.XendLogging import log
import xen.xend.XendConfig
from xen.xend.server.DevConstants import *
from xen.xend.xenstore.xstransact import xstransact, complete
from xen.xend.xenstore.xswatch import xswatch
import xen.xend.server.DevConstants
import os, re
xoptions = XendOptions.instance()
class DevController:
"""Abstract base class for a device controller. Device controllers create
appropriate entries in the store to trigger the creation, reconfiguration,
and destruction of devices in guest domains. Each subclass of
DevController is responsible for a particular device-class, and
understands the details of configuration specific to that device-class.
DevController itself provides the functionality common to all device
creation tasks, as well as providing an interface to XendDomainInfo for
triggering those events themselves.
"""
# Set when registered.
deviceClass = None
## public:
def __init__(self, vm):
self.vm = vm
self.hotplug = True
def createDevice(self, config):
"""Trigger the creation of a device with the given configuration.
@return The ID for the newly created device.
"""
(devid, back, front) = self.getDeviceDetails(config)
if devid is None:
return 0
self.setupDevice(config)
(backpath, frontpath) = self.addStoreEntries(config, devid, back,
front)
import xen.xend.XendDomain
xd = xen.xend.XendDomain.instance()
backdom_name = config.get('backend')
if backdom_name is None:
backdom = xen.xend.XendDomain.DOM0_ID
else:
bd = xd.domain_lookup_nr(backdom_name)
backdom = bd.getDomid()
count = 0
while True:
t = xstransact()
try:
if devid in self.deviceIDs(t):
if 'dev' in back:
dev_str = '%s (%d, %s)' % (back['dev'], devid,
self.deviceClass)
else:
dev_str = '%s (%s)' % (devid, self.deviceClass)
raise VmError("Device %s is already connected." % dev_str)
if count == 0:
log.debug('DevController: writing %s to %s.',
str(front), frontpath)
log.debug('DevController: writing %s to %s.',
str(xen.xend.XendConfig.scrub_password(back)), backpath)
elif count % 50 == 0:
log.debug(
'DevController: still waiting to write device entries.')
devpath = self.devicePath(devid)
t.remove(frontpath)
t.remove(backpath)
t.remove(devpath)
t.mkdir(backpath)
t.set_permissions(backpath,
{'dom': backdom },
{'dom' : self.vm.getDomid(),
'read' : True })
t.mkdir(frontpath)
t.set_permissions(frontpath,
{'dom': self.vm.getDomid()},
{'dom': backdom, 'read': True})
t.write2(frontpath, front)
t.write2(backpath, back)
t.mkdir(devpath)
t.write2(devpath, {
'backend' : backpath,
'backend-id' : "%i" % backdom,
'frontend' : frontpath,
'frontend-id' : "%i" % self.vm.getDomid()
})
if t.commit():
return devid
count += 1
except:
t.abort()
raise
def waitForDevices(self):
log.debug("Waiting for devices %s.", self.deviceClass)
return map(self.waitForDevice, self.deviceIDs())
def waitForDevice(self, devid):
log.debug("Waiting for %s.", devid)
if not self.hotplug:
return
(status, err) = self.waitForBackend(devid)
if status == Timeout:
self.destroyDevice(devid, False)
raise VmError("Device %s (%s) could not be connected. "
"Hotplug scripts not working." %
(devid, self.deviceClass))
elif status == Error:
self.destroyDevice(devid, False)
if err is None:
raise VmError("Device %s (%s) could not be connected. "
"Backend device not found." %
(devid, self.deviceClass))
else:
raise VmError("Device %s (%s) could not be connected. "
"%s" % (devid, self.deviceClass, err))
elif status == Missing:
# Don't try to destroy the device; it's already gone away.
raise VmError("Device %s (%s) could not be connected. "
"Device not found." % (devid, self.deviceClass))
elif status == Busy:
self.destroyDevice(devid, False)
if err is None:
err = "Busy."
raise VmError("Device %s (%s) could not be connected.\n%s" %
(devid, self.deviceClass, err))
def waitForDevice_destroy(self, devid, backpath):
log.debug("Waiting for %s - destroyDevice.", devid)
if not self.hotplug:
return
status = self.waitForBackend_destroy(backpath)
if status == Timeout:
raise VmError("Device %s (%s) could not be disconnected. " %
(devid, self.deviceClass))
def waitForDevice_reconfigure(self, devid):
log.debug("Waiting for %s - reconfigureDevice.", devid)
(status, err) = self.waitForBackend_reconfigure(devid)
if status == Timeout:
raise VmError("Device %s (%s) could not be reconfigured. " %
(devid, self.deviceClass))
def reconfigureDevice(self, devid, config):
"""Reconfigure the specified device.
The implementation here just raises VmError. This may be overridden
by those subclasses that can reconfigure their devices.
"""
raise VmError('%s devices may not be reconfigured' % self.deviceClass)
def destroyDevice(self, devid, force):
"""Destroy the specified device.
@param devid The device ID, or something device-specific from which
the device ID can be determined (such as a guest-side device name).
The implementation here simply deletes the appropriate paths from the
store. This may be overridden by subclasses who need to perform other
tasks on destruction. The implementation here accepts integer device
IDs or paths containg integer deviceIDs, e.g. vfb/0. Subclasses may
accept other values and convert them to integers before passing them
here.
"""
dev = self.convertToDeviceNumber(devid)
# Modify online status /before/ updating state (latter is watched by
# drivers, so this ordering avoids a race).
self.writeBackend(dev, 'online', "0")
self.writeBackend(dev, 'state', str(xenbusState['Closing']))
if force:
frontpath = self.frontendPath(dev)
backpath = self.readVm(dev, "backend")
if backpath:
xstransact.Remove(backpath)
xstransact.Remove(frontpath)
# xstransact.Remove(self.devicePath()) ?? Below is the same ?
self.vm._removeVm("device/%s/%d" % (self.deviceClass, dev))
def configurations(self, transaction = None):
return map(lambda x: self.configuration(x, transaction), self.deviceIDs(transaction))
def configuration(self, devid, transaction = None):
"""@return an s-expression giving the current configuration of the
specified device. This would be suitable for giving to {@link
#createDevice} in order to recreate that device."""
configDict = self.getDeviceConfiguration(devid, transaction)
sxpr = [self.deviceClass]
for key, val in configDict.items():
if isinstance(val, (types.ListType, types.TupleType)):
for v in val:
if v != None:
sxpr.append([key, v])
else:
if val != None:
sxpr.append([key, val])
return sxpr
def sxprs(self):
"""@return an s-expression describing all the devices of this
controller's device-class.
"""
return xstransact.ListRecursive(self.frontendRoot())
def sxpr(self, devid):
"""@return an s-expression describing the specified device.
"""
return [self.deviceClass, ['dom', self.vm.getDomid(),
'id', devid]]
def getDeviceConfiguration(self, devid, transaction = None):
"""Returns the configuration of a device.
@note: Similar to L{configuration} except it returns a dict.
@return: dict
"""
if transaction is None:
backdomid = xstransact.Read(self.devicePath(devid), "backend-id")
else:
backdomid = transaction.read(self.devicePath(devid) + "/backend-id")
if backdomid is None:
raise VmError("Device %s not connected" % devid)
return {'backend': int(backdomid)}
def getAllDeviceConfigurations(self):
all_configs = {}
for devid in self.deviceIDs():
config_dict = self.getDeviceConfiguration(devid)
all_configs[devid] = config_dict
return all_configs
def convertToDeviceNumber(self, devid):
try:
return int(devid)
except ValueError:
# Does devid contain devicetype/deviceid?
# Propogate exception if unable to find an integer devid
return int(type(devid) is str and devid.split('/')[-1] or None)
## protected:
def getDeviceDetails(self, config):
"""Compute the details for creation of a device corresponding to the
given configuration. These details consist of a tuple of (devID,
backDetails, frontDetails), where devID is the ID for the new device,
and backDetails and frontDetails are the device configuration
specifics for the backend and frontend respectively.
backDetails and frontDetails should be dictionaries, the keys and
values of which will be used as paths in the store. There is no need
for these dictionaries to include the references from frontend to
backend, nor vice versa, as these will be handled by DevController.
Abstract; must be implemented by every subclass.
@return (devID, backDetails, frontDetails), as specified above.
"""
raise NotImplementedError()
def setupDevice(self, config):
""" Setup device from config.
"""
return
def migrate(self, deviceConfig, network, dst, step, domName):
""" Migration of a device. The 'network' parameter indicates
whether the device is network-migrated (True). 'dst' then gives
the hostname of the machine to migrate to.
This function is called for 4 steps:
If step == 0: Check whether the device is ready to be migrated
or can at all be migrated; return a '-1' if
the device is NOT ready, a '0' otherwise. If it is
not ready ( = not possible to migrate this device),
migration will not take place.
step == 1: Called immediately after step 0; migration
of the kernel has started;
step == 2: Called after the suspend has been issued
to the domain and the domain is not scheduled anymore.
Synchronize with what was started in step 1, if necessary.
Now the device should initiate its transfer to the
given target. Since there might be more than just
one device initiating a migration, this step should
put the process performing the transfer into the
background and return immediately to achieve as much
concurrency as possible.
step == 3: Synchronize with the migration of the device that
was initiated in step 2.
Make sure that the migration has finished and only
then return from the call.
"""
tool = xoptions.get_external_migration_tool()
if tool:
log.info("Calling external migration tool for step %d" % step)
fd = os.popen("%s -type %s -step %d -host %s -domname %s" %
(tool, self.deviceClass, step, dst, domName))
for line in fd:
log.info(line.rstrip())
rc = fd.close()
if rc:
raise VmError('Migration tool returned %d' % (rc >> 8))
return 0
def recover_migrate(self, deviceConfig, network, dst, step, domName):
""" Recover from device migration. The given step was the
last one that was successfully executed.
"""
tool = xoptions.get_external_migration_tool()
if tool:
log.info("Calling external migration tool")
fd = os.popen("%s -type %s -step %d -host %s -domname %s -recover" %
(tool, self.deviceClass, step, dst, domName))
for line in fd:
log.info(line.rstrip())
rc = fd.close()
if rc:
raise VmError('Migration tool returned %d' % (rc >> 8))
return 0
def getDomid(self):
"""Stub to {@link XendDomainInfo.getDomid}, for use by our
subclasses.
"""
return self.vm.getDomid()
def allocateDeviceID(self):
"""Allocate a device ID, allocating them consecutively on a
per-domain, per-device-class basis, and using the store to record the
next available ID.
This method is available to our subclasses, though it is not
compulsory to use it; subclasses may prefer to allocate IDs based upon
the device configuration instead.
"""
path = self.frontendMiscPath()
return complete(path, self._allocateDeviceID)
def _allocateDeviceID(self, t):
result = t.read("nextDeviceID")
if result:
result = int(result)
else:
result = 0
t.write("nextDeviceID", str(result + 1))
return result
def removeBackend(self, devid, *args):
frontpath = self.frontendPath(devid)
backpath = xstransact.Read(frontpath, "backend")
if backpath:
return xstransact.Remove(backpath, *args)
else:
raise VmError("Device %s not connected" % devid)
def readVm(self, devid, *args):
devpath = self.devicePath(devid)
if devpath:
return xstransact.Read(devpath, *args)
else:
raise VmError("Device config %s not found" % devid)
def readBackend(self, devid, *args):
backpath = self.readVm(devid, "backend")
if backpath:
return xstransact.Read(backpath, *args)
else:
raise VmError("Device %s not connected" % devid)
def readBackendTxn(self, transaction, devid, *args):
backpath = self.readVm(devid, "backend")
if backpath:
paths = map(lambda x: backpath + "/" + x, args)
return transaction.read(*paths)
else:
raise VmError("Device %s not connected" % devid)
def readFrontend(self, devid, *args):
return xstransact.Read(self.frontendPath(devid), *args)
def readFrontendTxn(self, transaction, devid, *args):
paths = map(lambda x: self.frontendPath(devid) + "/" + x, args)
return transaction.read(*paths)
def deviceIDs(self, transaction = None):
"""@return The IDs of each of the devices currently configured for
this instance's deviceClass.
"""
fe = self.deviceRoot()
if transaction:
return map(lambda x: int(x.split('/')[-1]), transaction.list(fe))
else:
return map(int, xstransact.List(fe))
def writeBackend(self, devid, *args):
backpath = self.readVm(devid, "backend")
if backpath:
xstransact.Write(backpath, *args)
else:
raise VmError("Device %s not connected" % devid)
## private:
def addStoreEntries(self, config, devid, backDetails, frontDetails):
"""Add to backDetails and frontDetails the entries to be written in
the store to trigger creation of a device. The backend domain ID is
taken from the given config, paths for frontend and backend are
computed, and these are added to the backDetails and frontDetails
dictionaries for writing to the store, including references from
frontend to backend and vice versa.
@return A pair of (backpath, frontpath). backDetails and frontDetails
will have been updated appropriately, also.
@param config The configuration of the device, as given to
{@link #createDevice}.
@param devid As returned by {@link #getDeviceDetails}.
@param backDetails As returned by {@link #getDeviceDetails}.
@param frontDetails As returned by {@link #getDeviceDetails}.
"""
import xen.xend.XendDomain
xd = xen.xend.XendDomain.instance()
backdom_name = config.get('backend')
if backdom_name:
backdom = xd.domain_lookup_nr(backdom_name)
else:
backdom = xd.privilegedDomain()
if not backdom:
raise VmError("Cannot configure device for unknown backend %s" %
backdom_name)
frontpath = self.frontendPath(devid)
backpath = self.backendPath(backdom, devid)
frontDetails.update({
'backend' : backpath,
'backend-id' : "%i" % backdom.getDomid(),
'state' : str(xenbusState['Initialising'])
})
if self.vm.native_protocol:
frontDetails.update({'protocol' : self.vm.native_protocol})
backDetails.update({
'domain' : self.vm.getName(),
'frontend' : frontpath,
'frontend-id' : "%i" % self.vm.getDomid(),
'state' : str(xenbusState['Initialising']),
'online' : "1"
})
return (backpath, frontpath)
def waitForBackend(self, devid):
frontpath = self.frontendPath(devid)
# lookup a phantom
phantomPath = xstransact.Read(frontpath, 'phantom_vbd')
if phantomPath is not None:
log.debug("Waiting for %s's phantom %s.", devid, phantomPath)
statusPath = phantomPath + '/' + HOTPLUG_STATUS_NODE
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, hotplugStatusCallback, ev, result)
ev.wait(DEVICE_CREATE_TIMEOUT)
err = xstransact.Read(statusPath, HOTPLUG_ERROR_NODE)
if result['status'] != Connected:
return (result['status'], err)
backpath = self.readVm(devid, "backend")
if backpath:
statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, hotplugStatusCallback, ev, result)
ev.wait(DEVICE_CREATE_TIMEOUT)
err = xstransact.Read(backpath, HOTPLUG_ERROR_NODE)
return (result['status'], err)
else:
return (Missing, None)
def waitForBackend_destroy(self, backpath):
statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, deviceDestroyCallback, ev, result)
ev.wait(DEVICE_DESTROY_TIMEOUT)
return result['status']
def waitForBackend_reconfigure(self, devid):
frontpath = self.frontendPath(devid)
backpath = xstransact.Read(frontpath, "backend")
if backpath:
statusPath = backpath + '/' + "state"
ev = Event()
result = { 'status': Timeout }
xswatch(statusPath, xenbusStatusCallback, ev, result)
ev.wait(DEVICE_CREATE_TIMEOUT)
return (result['status'], None)
else:
return (Missing, None)
def backendPath(self, backdom, devid):
"""Construct backend path given the backend domain and device id.
@param backdom [XendDomainInfo] The backend domain info."""
return "%s/backend/%s/%s/%d" % (backdom.getDomainPath(),
self.deviceClass,
self.vm.getDomid(), devid)
def frontendPath(self, devid):
return "%s/%d" % (self.frontendRoot(), devid)
def frontendRoot(self):
return "%s/device/%s" % (self.vm.getDomainPath(), self.deviceClass)
def frontendMiscPath(self):
return "%s/device-misc/%s" % (self.vm.getDomainPath(),
self.deviceClass)
def deviceRoot(self):
"""Return the /vm/device. Because backendRoot assumes the
backend domain is 0"""
return "%s/device/%s" % (self.vm.vmpath, self.deviceClass)
def devicePath(self, devid):
"""Return the /device entry of the given VM. We use it to store
backend/frontend locations"""
return "%s/device/%s/%s" % (self.vm.vmpath,
self.deviceClass, devid)
def hotplugStatusCallback(statusPath, ev, result):
log.debug("hotplugStatusCallback %s.", statusPath)
status = xstransact.Read(statusPath)
if status is not None:
if status == HOTPLUG_STATUS_ERROR:
result['status'] = Error
elif status == HOTPLUG_STATUS_BUSY:
result['status'] = Busy
else:
result['status'] = Connected
else:
return 1
log.debug("hotplugStatusCallback %d.", result['status'])
ev.set()
return 0
def deviceDestroyCallback(statusPath, ev, result):
log.debug("deviceDestroyCallback %s.", statusPath)
status = xstransact.Read(statusPath)
if status is None:
result['status'] = Disconnected
else:
return 1
log.debug("deviceDestroyCallback %d.", result['status'])
ev.set()
return 0
def xenbusStatusCallback(statusPath, ev, result):
log.debug("xenbusStatusCallback %s.", statusPath)
status = xstransact.Read(statusPath)
if status == str(xenbusState['Connected']):
result['status'] = Connected
else:
return 1
log.debug("xenbusStatusCallback %d.", result['status'])
ev.set()
return 0
| gpl-2.0 |
frishberg/django | django/contrib/postgres/fields/hstore.py | 5 | 3326 | import json
from django.contrib.postgres import forms, lookups
from django.contrib.postgres.fields.array import ArrayField
from django.core import exceptions
from django.db.models import Field, TextField, Transform
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(Field):
empty_strings_allowed = False
description = _('Map of strings to strings')
default_error_messages = {
'not_a_string': _('The value of "%(key)s" is not a string.'),
}
def db_type(self, connection):
return 'hstore'
def get_transform(self, name):
transform = super(HStoreField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super(HStoreField, self).validate(value, model_instance)
for key, val in value.items():
if not isinstance(val, six.string_types):
raise exceptions.ValidationError(
self.error_messages['not_a_string'],
code='not_a_string',
params={'key': key},
)
def to_python(self, value):
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def value_to_string(self, obj):
return json.dumps(self.value_from_object(obj))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.HStoreField,
}
defaults.update(kwargs)
return super(HStoreField, self).formfield(**defaults)
def get_prep_value(self, value):
value = super(HStoreField, self).get_prep_value(value)
if isinstance(value, dict):
prep_value = {}
for key, val in value.items():
key = force_text(key)
if val is not None:
val = force_text(val)
prep_value[key] = val
value = prep_value
if isinstance(value, list):
value = [force_text(item) for item in value]
return value
HStoreField.register_lookup(lookups.DataContains)
HStoreField.register_lookup(lookups.ContainedBy)
HStoreField.register_lookup(lookups.HasKey)
HStoreField.register_lookup(lookups.HasKeys)
HStoreField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "(%s -> '%s')" % (lhs, self.key_name), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
@HStoreField.register_lookup
class KeysTransform(Transform):
lookup_name = 'keys'
function = 'akeys'
output_field = ArrayField(TextField())
@HStoreField.register_lookup
class ValuesTransform(Transform):
lookup_name = 'values'
function = 'avals'
output_field = ArrayField(TextField())
| bsd-3-clause |
Tom-Trumper/selenium | py/selenium/webdriver/common/service.py | 20 | 5855 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import errno
import os
import platform
import subprocess
from subprocess import PIPE
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
try:
from subprocess import DEVNULL
_HAS_NATIVE_DEVNULL = True
except ImportError:
DEVNULL = -3
_HAS_NATIVE_DEVNULL = False
class Service(object):
def __init__(self, executable, port=0, log_file=DEVNULL, env=None, start_error_message=""):
self.path = executable
self.port = port
if self.port == 0:
self.port = utils.free_port()
if not _HAS_NATIVE_DEVNULL and log_file == DEVNULL:
log_file = open(os.devnull, 'wb')
self.start_error_message = start_error_message
self.log_file = log_file
self.env = env or os.environ
@property
def service_url(self):
"""
Gets the url of the Service
"""
return "http://%s" % utils.join_host_port('localhost', self.port)
def command_line_args(self):
raise NotImplemented("This method needs to be implemented in a sub class")
def start(self):
"""
Starts the Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
cmd = [self.path]
cmd.extend(self.command_line_args())
self.process = subprocess.Popen(cmd, env=self.env,
close_fds=platform.system() != 'Windows',
stdout=self.log_file,
stderr=self.log_file,
stdin=PIPE)
except TypeError:
raise
except OSError as err:
if err.errno == errno.ENOENT:
raise WebDriverException(
"'%s' executable needs to be in PATH. %s" % (
os.path.basename(self.path), self.start_error_message)
)
elif err.errno == errno.EACCES:
raise WebDriverException(
"'%s' executable may have wrong permissions. %s" % (
os.path.basename(self.path), self.start_error_message)
)
else:
raise
except Exception as e:
raise WebDriverException(
"The executable %s needs to be available in the path. %s\n%s" %
(os.path.basename(self.path), self.start_error_message, str(e)))
count = 0
while True:
self.assert_process_still_running()
if self.is_connectable():
break
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the Service %s" % self.path)
def assert_process_still_running(self):
return_code = self.process.poll()
if return_code is not None:
raise WebDriverException(
'Service %s unexpectedly exited. Status code was: %s'
% (self.path, return_code)
)
def is_connectable(self):
return utils.is_connectable(self.port)
def send_remote_shutdown_command(self):
try:
from urllib import request as url_request
URLError = url_request.URLError
except ImportError:
import urllib2 as url_request
import urllib2
URLError = urllib2.URLError
try:
url_request.urlopen("%s/shutdown" % self.service_url)
except URLError:
return
for x in range(30):
if not self.is_connectable():
break
else:
time.sleep(1)
def stop(self):
"""
Stops the service.
"""
if self.log_file != PIPE and not (self.log_file == DEVNULL and _HAS_NATIVE_DEVNULL):
try:
self.log_file.close()
except Exception:
pass
if self.process is None:
return
try:
self.send_remote_shutdown_command()
except TypeError:
pass
try:
if self.process:
for stream in [self.process.stdin,
self.process.stdout,
self.process.stderr]:
try:
stream.close()
except AttributeError:
pass
self.process.terminate()
self.process.wait()
self.process.kill()
self.process = None
except OSError:
pass
def __del__(self):
# `subprocess.Popen` doesn't send signal on `__del__`;
# so we attempt to close the launched process when `__del__`
# is triggered.
try:
self.stop()
except Exception:
pass
| apache-2.0 |
RussianOtter/networking | AestheticAssembly.py | 1 | 11241 | import random, string, time, console, sys
logo = """ \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x80\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \n\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x92 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x93\xe2\x96\x88 \xe2\x96\x80 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 \xe2\x96\x80\xe2\x96\x88 \xe2\x96\x93 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x93\xe2\x96\x92\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x92 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x93\xe2\x96\x88 \xe2\x96\x80 \n\xe2\x96\x91 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x93\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x92\xe2\x96\x93\xe2\x96\x88 \xe2\x96\x84 \xe2\x96\x92 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x91\xe2\x96\x84\xe2\x96\x88 \xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \n \xe2\x96\x92 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84\xe2\x96\x88\xe2\x96\x93\xe2\x96\x92 \xe2\x96\x92\xe2\x96\x92\xe2\x96\x93\xe2\x96\x88 \xe2\x96\x84 \xe2\x96\x92\xe2\x96\x93\xe2\x96\x93\xe2\x96\x84 \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x91 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93 \xe2\x96\x91 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80\xe2\x96\x80\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x92\xe2\x96\x93\xe2\x96\x88 \xe2\x96\x84 \n\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 \xe2\x96\x91 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x91\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\n\xe2\x96\x92 \xe2\x96\x92\xe2\x96\x93\xe2\x96\x92 \xe2\x96\x92 \xe2\x96\x91\xe2\x96\x92\xe2\x96\x93\xe2\x96\x92\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x91\xe2\x96\x92 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x92\xe2\x96\x93 \xe2\x96\x91\xe2\x96\x92\xe2\x96\x93\xe2\x96\x91\xe2\x96\x91\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91 \xe2\x96\x91\n\xe2\x96\x91 \xe2\x96\x91\xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91\xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\n\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \n \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\n \xe2\x96\x91"""
melt = """
\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x93 \xe2\x96\x84\xe2\x96\x84\xe2\x96\x84\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x88 \xe2\x96\x88\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x88 \n\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x80\xe2\x96\x88\xe2\x96\x80 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x93\xe2\x96\x88 \xe2\x96\x80 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x93 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x93\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x80 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x8c\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x93\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x88 \xe2\x96\x91\xe2\x96\x88\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x80\xe2\x96\x88 \xe2\x96\x88 \n\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x88\xe2\x96\x8c\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x88 \xe2\x96\x91\xe2\x96\x88\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x80\xe2\x96\x88 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\n\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x92\xe2\x96\x93\xe2\x96\x88 \xe2\x96\x84 \xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x93\xe2\x96\x88\xe2\x96\x84 \xe2\x96\x8c\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x88 \xe2\x96\x91\xe2\x96\x88\xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x90\xe2\x96\x8c\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\n\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x91\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93 \xe2\x96\x91 \xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93\xe2\x96\x92\xe2\x96\x91\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x93\xe2\x96\x92\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91 \xe2\x96\x93\xe2\x96\x88\xe2\x96\x88\xe2\x96\x91\n\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91\xe2\x96\x93 \xe2\x96\x91\xe2\x96\x92 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x92\xe2\x96\x92\xe2\x96\x93 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91\xe2\x96\x92\xe2\x96\x91\xe2\x96\x92\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x93\xe2\x96\x91\xe2\x96\x92 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x92 \n\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x92\xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x92\xe2\x96\x91\n\xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x92 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \n \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \xe2\x96\x91 \n \xe2\x96\x91
"""
def rand_hex():
return random.choice(string.hexdigits.upper())+random.choice(string.hexdigits.upper())
def rand_point():
s = "0x"
while len(s) < 11:
s += rand_hex()
if random.randint(0,2) == 0:
s += s[(len(s)-1):]
return s[:11]
def rand_line(depth=1):
line = ""
l = rand_point()
while line.count(" ") < depth:
line += rand_point()
line += " "
return line
def rand_out(rate=0.05,t=20):
for _ in range(t*2):
r = random.randint(0,3)
for i in range(t):
b = rand_line(r)
if r == 0:
for _ in range(12):
b += rand_hex() + " "
if r == 1:
for _ in range(8):
b += rand_hex() + " "
if r == 2:
for _ in range(4):
b += rand_hex() + " "
if True:
for _ in range(6):
if random.randint(0,1) == 0:
b += "."
else:
b += random.choice(string.printable[:92])
if len(b) > 5:
print b
time.sleep(rate)
def typeit(msg):
for _ in msg:
sys.stdout.write(_)
time.sleep(0.05)
def seg1():
seg = ""
while len(seg) < random.randint(12,22):
seg += "".join(random.sample(string.digits+string.ascii_lowercase*2,random.randint(2,8)))+","+"*"*random.randint(0,3)+" "
seg = seg.replace("*","")
while len(seg) < 21:
seg += " "
return seg[:20]+" ;"
class pointer():
def next(self):
p = hex(self.i)[2:].zfill(len(str(self.i)))
s = "0x"
p = s+"0"*(8-len(p))+p
self.i += 1
return p
def __init__(self,start=0):
self.i = start
def seg2():
seg = ""
opt = [
"".join(random.sample(string.ascii_lowercase*3,random.randint(4,6)))+" = ",
"."+"".join(random.sample(string.ascii_uppercase*3,random.randint(4,6)))+" "*4,
"."+"".join(random.sample(string.ascii_uppercase*3,random.randint(4,6)))+" "*4,
"."+"".join(random.sample(string.ascii_uppercase*3,random.randint(4,6)))+" "*4,
]
while len(seg) < random.randint(18,30):
seg += rand_hex()+" "
seg += rand_hex()
while len(seg) < 32:
seg += " "
seg = seg[:32]
sg = random.choice(opt)
while len(sg) < 15:
sg += " "
return sg+seg
def spectre():
time.sleep(1)
console.set_font("Menlo",10)
console.set_color(0.9,0,0)
print "\n"
print logo
print
time.sleep(2.5)
console.set_color()
console.set_font("Menlo",7)
pr = pointer(47839215)
lc = False
for _ in range(500*2):
if lc:
console.set_color()
lc = False
if not random.randint(0,35):
console.set_color(1,0,0)
lc = True
o1 = pr.next()+" "+seg1()+" "+seg2()
o2 = pr.next()+" "*25+seg2()
o3 = pr.next()
print random.choice([o1,o1,o1,o2,o2,o3])
if lc:
time.sleep(0.02)
time.sleep(0.01)
console.set_font("Menlo",8.62)
console.set_color(0.9,0,0)
print "\n"
for _ in melt.split("\n"):
print _
time.sleep(0.01)
time.sleep(2.5)
console.set_color()
console.set_font("Menlo",14)
typeit("Leaking Memory "+"."*18+" [ OK ]")
pr = pointer(47839215)
print
for _ in range(5000):
sys.stdout.write("\rMemory Address "+"."*12+" [ "+pr.next()+" ]")
time.sleep(0.01)
spectre()
| gpl-3.0 |
augmify/omim | tools/python/stylesheet/drules_to_mapcss.py | 53 | 19179 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import os
import drules_struct_pb2
from webcolors.webcolors import hex_to_color_name
from operator import *
from copy import *
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
DATADIR = "../../../data"
files = {
"mapcss_mapping": os.path.join(DATADIR, 'mapcss-mapping.csv'),
"drules_proto": os.path.join(DATADIR, 'drules_proto.bin'),
}
MINIMAL_WIDTH = .1
LAST_ZOOM = 17
FLAVOUR = set(["JOSM"]) # "JOSM"
FLAVOUR = []
def number(f):
if float(int(float(f))) == float(str(f)):
return int(float(f))
return float(('%.2f' % float(f)).rstrip('0'))
def color_to_properties(color, prefix=""):
if prefix:
prefix += "-"
colorvar = prefix + "color"
opacityvar = prefix + "opacity"
dd = {}
dd[colorvar] = hex_to_color_name("#" + hex(int(color) % (0x1000000))[2:]) # leave only last 24 bits
opacity = (255 - (int(color) >> 24)) / 255.
if opacity != 1:
dd[opacityvar] = number(opacity)
return dd
print """
canvas {
background-color: #f1eee8;
default-lines: false;
default-points: false;
}
way::* {
linejoin: round;
linecap: round;
fill-opacity: 0;
casing-linecap: none;
text-position: line;
}
*::* {
text-halo-color: white;
text-anchor-horizontal: center;
text-anchor-vertical: center;
}
node::* {
text-anchor-vertical: top;
}
area::* {
text-position: center;
text-anchor-vertical: center;
}
area[landuse],
area[natural],
area[leisure],
area[place] {fill-position: background}
"""
if True:
classificator = {}
classificator_mapping = {}
class_order = []
for row in csv.reader(open(files['mapcss_mapping']), delimiter=';'):
try:
pairs = [i.strip(']').split("=") for i in row[1].split(',')[0].split('[')]
except:
print row
classificator_mapping[row[0].replace("|", "-")] = row[1]
kv = {}
for i in pairs:
if len(i) == 1:
if i[0]:
if i[0][0] == "!":
kv[i[0][1:].strip('?')] = "no"
else:
kv[i[0].strip('?')] = "yes"
else:
kv[i[0].strip('?')] = i[1]
classificator[row[0].replace("|", "-")] = kv
if row[2] != "x":
class_order.append(row[0].replace("|", "-"))
class_order.sort()
drules = drules_struct_pb2.ContainerProto()
drules.ParseFromString(open(files["drules_proto"]).read())
names = set()
linejoins = {drules_struct_pb2.BEVELJOIN: "bevel", drules_struct_pb2.ROUNDJOIN: "round", drules_struct_pb2.NOJOIN: "none"}
linecaps = {drules_struct_pb2.SQUARECAP: "square", drules_struct_pb2.ROUNDCAP: "round", drules_struct_pb2.BUTTCAP: "none"}
deduped_sheet = {}
for elem in drules.cont:
visible_on_zooms = []
if elem.name not in class_order and elem.element:
print >> sys.stderr, elem.name, "rendered but not in classificator"
continue
names.add(elem.name)
if not elem.element:
print >> sys.stderr, elem.name, "is not rendered"
continue
for el in elem.element:
etype = set()
zoom = el.scale
if zoom <= 0:
continue
# if zoom <= 1:
# zoom = -1
visible_on_zooms.append(zoom)
selector = classificator_mapping[elem.name]
kvrules = [{}]
tdashes = False
for tline in el.lines:
if tline.pathsym.name or tline.dashdot.dd:
tdashes = True
if len(el.lines) == 2 and not tdashes: # and not "bridge" in elem.name:
"""
line and casing, no dashes
"""
etype.add("line")
if el.lines[0].priority < el.lines[1].priority:
tline = el.lines[1]
tcasing = el.lines[0]
elif el.lines[0].priority > el.lines[1].priority:
tline = el.lines[0]
tcasing = el.lines[1]
else:
print >> sys.stderr, elem.name, "has two lines on same z"
twidth = tline.width
if twidth < MINIMAL_WIDTH:
print >> sys.stderr, elem.name, "has invisible lines on zoom", zoom
else:
# tlinedashes =
kvrules[0]["width"] = number(twidth)
kvrules[0].update(color_to_properties(tline.color))
kvrules[0]["z-index"] = number(tline.priority)
tlinedashes = ",".join([str(number(t)) for t in tline.dashdot.dd])
if tlinedashes:
kvrules[0]["dashes"] = tlinedashes
if tline.HasField("cap"):
kvrules[0]["linecap"] = linecaps.get(tline.cap, 'round')
if tline.HasField("join"):
kvrules[0]["linejoin"] = linejoins.get(tline.join, 'round')
tcasingwidth = (tcasing.width - tline.width) / 2
if ("width" not in kvrules[0]) and tcasingwidth < MINIMAL_WIDTH:
print >> sys.stderr, elem.name, "has invisible casing on zoom", zoom
else:
tcasingdashes = ",".join([str(number(t)) for t in tcasing.dashdot.dd])
if tlinedashes != tcasingdashes:
kvrules[0]["casing-dashes"] = tcasingdashes
kvrules[0]["casing-width"] = number(tcasingwidth)
kvrules[0].update(color_to_properties(tcasing.color, "casing"))
if tcasing.HasField("cap"):
kvrules[0]["casing-linecap"] = linecaps.get(tcasing.cap, 'round')
if tcasing.HasField("join"):
kvrules[0]["casing-linejoin"] = linejoins.get(tcasing.join, 'round')
elif len(el.lines) > 0:
"""
do we have lines at all?
"""
etype.add("line")
for tline in el.lines:
tkv = {}
twidth = tline.width
tlinedashes = ",".join([str(number(t)) for t in tline.dashdot.dd])
if twidth < MINIMAL_WIDTH:
if not tline.pathsym.name:
print >> sys.stderr, elem.name, "has invisible lines on zoom", zoom
else:
tkv["width"] = number(twidth)
tkv.update(color_to_properties(tline.color))
tkv["z-index"] = number(tline.priority)
if tline.HasField("cap"):
tkv["linecap"] = linecaps.get(tline.cap, 'round')
if tline.HasField("join"):
tkv["linejoin"] = linejoins.get(tline.join, 'round')
if tline.dashdot.dd:
tkv["dashes"] = tlinedashes
for trule in kvrules:
if "width" not in trule and tkv["z-index"] == trule.get("z-index", tkv["z-index"]):
trule.update(tkv)
break
else:
kvrules.append(tkv)
if tline.pathsym.name:
kvrules[0]["pattern-image"] = tline.pathsym.name + ".svg"
kvrules[0]["pattern-spacing"] = number(tline.pathsym.step - 16)
kvrules[0]["pattern-offset"] = number(tline.pathsym.offset)
kvrules[0]["z-index"] = number(tline.priority)
if el.area.color:
etype.add("area")
tkv = {}
tkv["z-index"] = el.area.priority
tkv.update(color_to_properties(el.area.color, "fill"))
# if el.area.border.width:
# tline = el.area.border
# tkv["casing-width"] = str(tline.width)
# tkv.update(color_to_properties(tline.color, "casing"))
# if tline.dashdot.dd:
# tkv["casing-dashes"] = ",".join([str(t) for t in tline.dashdot.dd])
if not kvrules[0]:
kvrules[0] = tkv
else:
kvrules.append(tkv)
if el.symbol.name:
if el.symbol.apply_for_type == 0:
etype.add("node")
etype.add("area")
elif el.symbol.apply_for_type == 1:
etype.add("node")
elif el.symbol.apply_for_type == 2:
etype.add("area")
kvrules[0]["icon-image"] = el.symbol.name + ".svg"
if el.circle.radius:
etype.add("node")
kvrules[0]["symbol-shape"] = "circle"
kvrules[0]["symbol-size"] = number(el.circle.radius)
kvrules[0].update(color_to_properties(el.circle.color, "symbol-fill"))
if el.caption.primary.height:
etype.add("node")
etype.add("area")
kvrules[0].update(color_to_properties(el.caption.primary.color, "text"))
if el.caption.primary.stroke_color:
kvrules[0].update(color_to_properties(el.caption.primary.stroke_color, "text-halo"))
kvrules[0]["text-halo-radius"] = 2
kvrules[0]["text"] = "name"
if "building" in selector:
kvrules[0]["text"] = "addr:housenumber"
kvrules[0]["font-size"] = el.caption.primary.height
if el.caption.primary.offset_y:
kvrules[0]["text-offset"] = el.caption.primary.offset_y
if el.caption.secondary.height:
tkv = {}
etype.add("node")
etype.add("area")
tkv.update(color_to_properties(el.caption.secondary.color, "text"))
if el.caption.secondary.stroke_color:
tkv.update(color_to_properties(el.caption.secondary.stroke_color, "text-halo"))
tkv["text-halo-radius"] = 2
tkv["text"] = "int_name"
tkv["font-size"] = el.caption.secondary.height
if el.caption.primary.offset_y:
tkv["text-offset"] = el.caption.secondary.offset_y
kvrules.append(tkv)
if el.path_text.primary.height:
etype.add("line")
kvrules[0].update(color_to_properties(el.path_text.primary.color, "text"))
if el.path_text.primary.stroke_color:
kvrules[0].update(color_to_properties(el.path_text.primary.stroke_color, "text-halo"))
kvrules[0]["text-halo-radius"] = 2
kvrules[0]["text"] = "name"
kvrules[0]["text-position"] = "line"
if "building" in selector:
kvrules[0]["text"] = "addr:housenumber"
kvrules[0]["font-size"] = el.path_text.primary.height
if el.path_text.secondary.height:
tkv = {}
etype.add("line")
tkv.update(color_to_properties(el.path_text.secondary.color, "text"))
if el.path_text.secondary.stroke_color:
tkv.update(color_to_properties(el.path_text.secondary.stroke_color, "text-halo"))
tkv["text-halo-radius"] = 2
tkv["text"] = "int_name"
tkv["text-position"] = "line"
tkv["font-size"] = el.path_text.secondary.height
kvrules.append(tkv)
tt = []
if "[area?]" in selector:
etype.discard("way")
etype.discard("line")
etype.add("area")
selector = selector.replace("[area?]", "")
if ("line" in etype) and ("JOSM" in FLAVOUR):
etype.add("way")
etype.discard("line")
for tetype in etype:
# lzoom = zoom
# if zoom == LAST_ZOOM:
# lzoom = str(zoom)+"-"
for tsel in selector.split(","):
tsel = tsel.strip()
# tt.append( "%(tetype)s|z%(lzoom)s%(tsel)s"%(locals()))
tt.append([tetype, zoom, zoom, tsel, ''])
tl = 0
for kvrul in kvrules:
if not kvrul:
continue
tsubpart = ""
filt = {
#'z': [['z-index', 'fill-position'], []],
#'halo': [['text-halo-radius', 'text-halo-color'], []],
'text': [['font-size', 'text-offset', 'text', 'text-color', 'text-position', 'text-halo-radius', 'text-halo-color'], []],
#'casing': ['casing-width', 'casing-dashes', 'casing-color'],
'icon': [['icon-image', 'symbol-shape', 'symbol-size', 'symbol-fill-color'], ['node', 'area']],
'fill': [['fill-color', 'fill-opacity'], ['area']]
}
genkv = []
for k, v in filt.iteritems():
f = {}
for vi in v[0]:
if vi in kvrul:
f[vi] = kvrul[vi]
del kvrul[vi]
if f:
genkv.append(f)
genkv.append(kvrul)
for kvrule in genkv:
tl += 1
# print selector
if (tl > 1) or ("bridge" in selector) or ("junction" in selector):
tsubpart = "::d%sp%s" % (elem.name.count("-"), tl)
if ("bridge" in selector):
kvrule['z-index'] = tl
if "dashes" in kvrule:
kvrule["linecap"] = "none"
if float(kvrule.get('z-index', 0)) < -5000:
kvrule['fill-position'] = 'background'
if "z-index" in kvrule:
del kvrule['z-index']
# kvrule['z-index'] = float(kvrule.get('z-index',0)) + 9962
if float(kvrule.get('z-index', 0)) > 10000:
kvrule['-x-kot-layer'] = 'top'
kvrule['z-index'] = float(kvrule.get('z-index', 0)) - 10000
if float(kvrule.get('fill-opacity', 1)) == 0:
for discard in ['fill-color', 'fill-opacity', 'z-index', 'fill-position']:
if discard in kvrule:
del kvrule[discard]
if float(kvrule.get('opacity', 1)) == 0:
for discard in ['color', 'width', 'z-index']:
if discard in kvrule:
del kvrule[discard]
if float(kvrule.get('z-index', 0)) == 0:
if 'z-index' in kvrule:
del kvrule['z-index']
key = kvrule.copy()
# if "z-index" in key:
# del key["z-index"]
if not kvrule:
continue
key = (frozenset(key.items()))
minzoom = min([i[1] for i in tt])
if key not in deduped_sheet:
deduped_sheet[key] = {
"sel": [],
"maxz": kvrule.get('z-index', 0),
"minz": kvrule.get('z-index', 0),
"minzoom": minzoom,
"z": kvrule.get('z-index', 0),
"kv": {}
}
tt = deepcopy(tt)
for t in tt:
t[-1] = tsubpart
deduped_sheet[key]['sel'].extend(tt)
deduped_sheet[key]['maxz'] = max(deduped_sheet[key]['maxz'], kvrule.get('z-index', 0))
deduped_sheet[key]['minz'] = max(deduped_sheet[key]['minz'], kvrule.get('z-index', 0))
deduped_sheet[key]['z'] = number((deduped_sheet[key]['minz'] + deduped_sheet[key]['maxz']) / 2)
deduped_sheet[key]['minzoom'] = min(deduped_sheet[key]['minzoom'], minzoom)
deduped_sheet[key]['kv'] = kvrule
# else:
# print >> sys.stderr, selector, el
skipped_unstyled_zooms = set(range(min(visible_on_zooms), max(visible_on_zooms) + 1)).difference(visible_on_zooms)
if skipped_unstyled_zooms:
print >> sys.stderr, elem.name, "has no styles available for zooms", ", ".join([str(i) for i in skipped_unstyled_zooms])
# print len(deduped_sheet)
dds = deduped_sheet.keys()
dds.sort(lambda k, v: int(deduped_sheet[k]['minzoom'] - deduped_sheet[v]['minzoom']))
allz = list(set([f['z'] for f in deduped_sheet.values()]))
allz.sort()
for tk in dds:
tv = deduped_sheet[tk]
tv['sel'].sort(key=itemgetter(0, 4, 3, 1, 2))
def dedup_zooms(lst, item):
if lst:
lst[-1] = lst[-1][:]
if lst[-1][0] == item[0] and lst[-1][3] in item[3] and lst[-1][4] == item[4] and lst[-1][2] >= item[2] and lst[-1][1] <= item[1]:
return lst
if lst[-1][0] == item[0] and lst[-1][3] == item[3] and lst[-1][4] == item[4] and lst[-1][2] == (item[1] - 1):
lst[-1][2] = item[2]
return lst
lst.append(item)
return lst
tv['sel'] = reduce(dedup_zooms, tv['sel'], [])
def format_string(i):
i = i[:]
dash = '-'
zmark = '|z'
if i[2] == LAST_ZOOM:
i[2] = ''
if i[1] == i[2]:
i[2] = ''
dash = ''
if i[1] <= 1:
i[1] = ''
if i[1] == i[2] and i[1] == '':
zmark = ''
dash = ''
return "%s%s%s%s%s%s%s" % (i[0], zmark, i[1], dash, i[2], i[3], i[4])
tv['sel'] = [format_string(i) for i in tv['sel']]
print (",\n").join([i for i in tv['sel']])
print "{"
kvrule = tv['kv']
kvrule['z-index'] = allz.index(tv['z'])
for k, v in kvrule.iteritems():
v = str(v)
if k == "z-index" and str(number(v)) == "0":
continue
# elif k == "z-index":
# v = str(2000 - int(v))
if " " in v or ":" in v or not v:
v = '"' + v + '"'
print " " + k + ":\t" + str(v) + ";"
print "}"
for i in names.symmetric_difference(class_order):
print >> sys.stderr, i, "in classificator but not rendered"
| apache-2.0 |
gbook/nidb | src/qcmodules/MRStructuralMotion/MRStructuralMotion.py | 1 | 14647 | # the order of these calls is important...
import sys
import os
import re
import getopt
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
import scipy.ndimage
import scipy.misc
from scipy import stats
import numpy
import math
import string
import random
from subprocess import call
import shutil
import glob
import MySQLdb
import time
# -------- main --------
def main():
# get start time
t0 = time.clock()
LoadConfig()
db = MySQLdb.connect(host=cfg['mysqlhost'], user=cfg['mysqluser'], passwd=cfg['mysqlpassword'], db=cfg['mysqldatabase'])
# indir is the original dicom directory for that series in the archive
moduleseriesid = sys.argv[1]
# get all the path information from the database
sqlstring = "select * from qc_moduleseries where qcmoduleseries_id = " + moduleseriesid
result = db.cursor(MySQLdb.cursors.DictCursor)
result.execute(sqlstring)
row = result.fetchone()
seriesid = row['series_id']
modality = row['modality']
# get the paths to the raw data, and copy it to a temp directory
sqlstring = "select a.series_num, a.is_derived, a.data_type, a.bold_reps, a.img_rows, b.study_num, d.uid from {0}_series a left join studies b on a.study_id = b.study_id left join enrollment c on b.enrollment_id = c.enrollment_id left join subjects d on c.subject_id = d.subject_id left join projects e on c.project_id = e.project_id where a.{1}series_id = '{2}'".format(modality,modality,seriesid)
print(sqlstring)
result = db.cursor(MySQLdb.cursors.DictCursor)
result.execute(sqlstring)
row = result.fetchone()
uid = row['uid']
study_num = row['study_num']
series_num = row['series_num']
datatype = row['data_type']
boldreps = row['bold_reps']
imgrows = row['img_rows']
if boldreps > 1:
print "Bold reps greater than 1, skipping"
exit(0)
if imgrows > 512:
print "Y dimension greater than 512 pixels, skipping"
exit(0)
# build the indir
indir = "{0}/{1}/{2}/{3}/{4}".format(cfg['archivedir'], uid, study_num, series_num, datatype)
print indir
#exit(0)
# create a tmp directory
outdir = '/tmp/Py_' + GenerateRandomString()
print ("Output directory: " + outdir)
if not os.path.exists(outdir):
os.makedirs(outdir)
# create a nifti file to check the sizes
#systemstring = "{0}/./dcm2nii -b '{0}/dcm2nii_4D.ini' -a y -e y -g y -p n -i n -d n -f n -o '{1}' {2}/*.dcm".format(cfg['scriptdir'],outdir,indir)
#print("\nRunning: [" + systemstring + "]\n")
#call(systemstring, shell=True)
# rename the file to 4D
#systemstring = "mv {0}/*.nii.gz {0}/4D.nii.gz".format(outdir)
#print("\nRunning: [" + systemstring + "]\n")
#call(systemstring, shell=True)
# get file dimensions
#systemstring = "fslval"
#dim4 =
# copy all dicom files to outdir (in case you screw up and delete the raw dicoms :(
systemstring = "cp " + indir + "/*.dcm " + outdir
print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
# go into the temp directory
os.chdir(outdir)
# convert all the dicom files in the input directory INTO the temp directory as png files
systemstring = "mogrify -depth 16 -format png *.dcm"
print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
# get list of png files
pngfiles = sorted(glob.glob('*.png'))
#print pngfiles
# check if there's only 1 file
if len(pngfiles) < 2:
print 0
exit(0)
i = 0
#totala = totalb = 0
#print '[%s]' % ', '.join(map(str, pngfiles))
allhist = []
for pngfile in pngfiles:
print os.path.exists(pngfile)
brain = matplotlib.image.imread(pngfile)
type(brain)
print brain.shape
print brain.dtype
#fft = numpy.log10(1+abs(numpy.fft.fftshift(numpy.fft.fft2(brain))))
fft = 1+abs(numpy.fft.fftshift(numpy.fft.fft2(brain)))
filename = "slice%d.png"%i
matplotlib.image.imsave(filename,fft)
print "Entering into azimuthalAverage({0}/{1}/{2})".format(uid,study_num,series_num)
histogram = azimuthalAverage(fft)
print "Should be done with azimuthalAverage({0}/{1}/{2})".format(uid,study_num,series_num)
# remove last element, because its always a NaN
#print 'Before [%s]' % ', '.join(map(str, histogram))
#print histogram.shape
histogram = numpy.delete(histogram, -1, 0)
#print 'After [%s]' % ', '.join(map(str, histogram))
# add this histo to the total histo
allhist.append(histogram)
#print allhist.size
#print float(i)
#print float(len(pngfiles)-1.0)
c = str(float(i)/float(len(pngfiles)-1.0))
#print "%.1f %% complete"%( (float(i)/(len(pngfiles)-1))*100)
lines = pyplot.plot(numpy.log10(histogram))
pyplot.setp(lines, color='#0000AA', alpha=0.25)
#totala += a
#totalb += b
i+=1
print "Hello"
allhist2 = numpy.vstack(allhist)
meanhistogram = allhist2.mean(axis=1)
print len(meanhistogram)
#del meanhistogram[-1]
print '[%s]' % ', '.join(map(str, allhist))
#a,b = linreg(range(len(meanhistogram)),meanhistogram)
#print "a,b [%d,%d]",a,b
# find mean slopes
#meana = totala/float(i)
#meanb = totalb/float(i)
dists = []
dists.extend(range(0,len(meanhistogram)))
#print dists
slope, intercept, r_value, p_value, std_err = stats.linregress(dists,meanhistogram)
pyplot.setp(lines, color='#0000AA', alpha=0.25)
print "R-value: "
print (slope)
#write out the final composite histogram
pyplot.xlabel('Frequency (lo -> hi)')
pyplot.ylabel('Power (log10)')
suptitle = 'Radial average of FFT (' + indir + ')'
pyplot.suptitle(suptitle)
title = "R^2: {0}".format(slope)
pyplot.title(title)
pyplot.grid(True)
#slope, intercept = numpy.polyfit(meanhistogram, dists, 1)
#idealhistogram = intercept + (slope * meanhistogram)
#r_sq = numpy.r_squared(dists, idealhistogram)
#r_sq = slope*slope
#fit_label = 'Linear fit ({0:.2f})'.format(slope)
#pyplot.plot(dists, idealhistogram, color='red', linestyle='--', label=fit_label)
#pyplot.annotate('r^2 = {0:.2f}'.format(r_sq), (0.05, 0.9), xycoords='axes fraction')
#pyplot.legend(loc='lower right')
# save the figure
pyplot.savefig('StructuralMotionHistogram.png')
# record the slope/intercept
if not os.path.exists(indir + "/qa"):
os.makedirs(indir + "/qa")
qafile = indir + "/qa/StructuralMotionR2.txt"
file = open(qafile, "w")
theline = "%f"%(slope)
file.write(theline)
file.close()
# get stop time
t = time.clock() - t0
# insert the result name into the database
sqlstring = "select qcresultname_id from qc_resultnames where qcresult_name = 'MotionR2'"
resultA = db.cursor(MySQLdb.cursors.DictCursor)
resultA.execute(sqlstring)
rowA = resultA.fetchone()
if resultA.rowcount > 0:
resultnameid = rowA['qcresultname_id']
else:
# insert a row
sqlstring = "insert into qc_resultnames (qcresult_name, qcresult_type) values ('MotionR2','number')"
print(sqlstring)
resultB = db.cursor(MySQLdb.cursors.DictCursor)
resultB.execute(sqlstring)
resultnameid = resultB.lastrowid
# InnoDB table... needs commit!
sqlstring = "insert into qc_results (qcmoduleseries_id, qcresultname_id, qcresults_valuenumber, qcresults_datetime, qcresults_cputime) values ({0}, {1}, {2}, now(), {3})".format(moduleseriesid,resultnameid,slope,t)
print(sqlstring)
cursor = db.cursor()
try:
cursor.execute(sqlstring)
db.commit()
except:
print("SQL statement [" + sqlstring + "] failed")
db.rollback()
exit(0)
# insert the image name into the resultnames table
sqlstring = "select qcresultname_id from qc_resultnames where qcresult_name = 'MotionR2 Plot'"
resultA = db.cursor(MySQLdb.cursors.DictCursor)
resultA.execute(sqlstring)
rowA = resultA.fetchone()
if resultA.rowcount > 0:
resultnameid = rowA['qcresultname_id']
else:
# insert a row
sqlstring = "insert into qc_resultnames (qcresult_name, qcresult_type) values ('MotionR2 Plot', 'image')"
print(sqlstring)
resultB = db.cursor(MySQLdb.cursors.DictCursor)
resultB.execute(sqlstring)
resultnameid = resultB.lastrowid
# insert an entry for the image into the database
sqlstring = "insert into qc_results (qcmoduleseries_id, qcresultname_id, qcresults_valuefile, qcresults_datetime) values ({0}, {1}, 'StructuralMotionHistogram.png', now())".format(moduleseriesid,resultnameid)
print(sqlstring)
cursor = db.cursor()
try:
cursor.execute(sqlstring)
db.commit()
except:
print("SQL statement [" + sqlstring + "] failed")
db.rollback()
exit(0)
# insert the R2 value into the mr_qa table
sqlstring = "update mr_qa set motion_rsq = '{0}' where mrseries_id = {1}".format(r_value**2,seriesid)
print(sqlstring)
cursor = db.cursor()
try:
cursor.execute(sqlstring)
db.commit()
except:
print("SQL statement [" + sqlstring + "] failed")
db.rollback()
exit(0)
#copy the histogram back to the qa directory
systemstring = "cp " + outdir + "/StructuralMotionHistogram.png " + "{0}/{1}/{2}/{3}/qa".format(cfg['archivedir'], uid, study_num, series_num)
#print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
# remove the temp directory and all its contents
#shutil.rmtree(outdir)
systemstring = "rm -r " + outdir
print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
exit(0)
# -----------------------------------------------------------------------------
# ---------- azimuthalAverage -------------------------------------------------
# -----------------------------------------------------------------------------
# Compute the radial average of an image. Assumes the FFT image is centered
# -----------------------------------------------------------------------------
def azimuthalAverage(image, center=None, stddev=False, returnradii=False, return_nr=False,
binsize=1, weights=None, steps=False, interpnan=False, left=None, right=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fractional pixels).
stddev - if specified, return the azimuthal standard deviation instead of the average
returnradii - if specified, return (radii_array,radial_profile)
return_nr - if specified, return number of pixels per radius *and* radius
binsize - size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large
weights - can do a weighted average instead of a simple average if this keyword parameter
is set. weights.shape must = image.shape. weighted stddev is undefined, so don't
set weights and stddev.
steps - if specified, will return a double-length bin array and radial
profile so you can plot a step-form radial profile (which more accurately
represents what's going on)
interpnan - Interpolate over NAN values, i.e. bins where there is no data?
left,right - passed to interpnan; they set the extrapolated values
If a bin contains NO DATA, it will have a NAN value because of the
divide-by-sum-of-weights component. I think this is a useful way to denote
lack of data, but users let me know if an alternative is prefered...
"""
# Calculate the indices from the image
y, x = numpy.indices(image.shape)
if center is None:
center = numpy.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = numpy.hypot(x - center[0], y - center[1])
if weights is None:
weights = numpy.ones(image.shape)
elif stddev:
raise ValueError("Weighted standard deviation is not defined.")
# the 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(numpy.round(r.max() / binsize)+1)
maxbin = nbins * binsize
bins = numpy.linspace(0,maxbin,nbins+1)
# but we're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:]+bins[:-1])/2.0
# Find out which radial bin each point in the map belongs to
whichbin = numpy.digitize(r.flat,bins)
# how many per bin (i.e., histogram)?
# there are never any in bin 0, because the lowest index returned by digitize is 1
nr = numpy.bincount(whichbin)[1:]
# recall that bins are from 1 to nbins (which is expressed in array terms by arange(nbins)+1 or xrange(1,nbins+1) )
# radial_prof.shape = bin_centers.shape
if stddev:
radial_prof = numpy.array([image.flat[whichbin==b].std() for b in xrange(1,nbins+1)])
else:
radial_prof = numpy.array([(image*weights).flat[whichbin==b].sum() / weights.flat[whichbin==b].sum() for b in xrange(1,nbins+1)])
#import pdb; pdb.set_trace()
if interpnan:
radial_prof = numpy.interp(bin_centers,bin_centers[radial_prof==radial_prof],radial_prof[radial_prof==radial_prof],left=left,right=right)
if steps:
xarr = numpy.array(zip(bins[:-1],bins[1:])).ravel()
yarr = numpy.array(zip(radial_prof,radial_prof)).ravel()
return xarr,yarr
elif returnradii:
return bin_centers,radial_prof
elif return_nr:
return nr,bin_centers,radial_prof
else:
return radial_prof
# -----------------------------------------------------------------------------
# ---------- GenerateRandomString ---------------------------------------------
# -----------------------------------------------------------------------------
# Compute the radial average of an image. Assumes the FFT image is centered
# -----------------------------------------------------------------------------
def GenerateRandomString(size=10, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for x in range(size))
# -----------------------------------------------------------------------------
# ---------- LoadConfig -------------------------------------------------------
# -----------------------------------------------------------------------------
# Load the NiDB configuration file which includes database and path info
# -----------------------------------------------------------------------------
def LoadConfig():
global cfg
cfg = {}
f = open('/ado2/prod/programs/nidb.cfg','r')
#f = open('../../nidb.cfg','r')
#with open('../../nidb.cfg') as f:
try:
for line in f:
line = line.strip()
if (line != "") and (line[0] != "#"):
[variable, value] = line.split(' = ')
variable = re.sub('(\[|\])','',variable)
cfg[variable] = value
#print variable
finally:
f.close()
return
# I guess this is needed to execute the main function if none other is called...
# basically defining an entry point into the program
if __name__ == "__main__":
sys.exit(main()) | gpl-3.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/build/android/gyp/generate_resource_rewriter.py | 8 | 3415 | #!/usr/bin/env python
#
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate ResourceRewriter.java which overwrites the given package's
resource id.
"""
import argparse
import os
import sys
import zipfile
from util import build_utils
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'..',
'..',
'third_party')))
import jinja2
RESOURCE_REWRITER_JAVA="ResourceRewriter.java"
RESOURCE_REWRITER="""/* AUTO-GENERATED FILE. DO NOT MODIFY. */
package {{ package }};
/**
* Helper class used to fix up resource ids.
*/
class ResourceRewriter {
/**
* Rewrite the R 'constants' for the WebView.
*/
public static void rewriteRValues(final int packageId) {
{% for res_package in res_packages %}
{{ res_package }}.R.onResourcesLoaded(packageId);
{% endfor %}
}
}
"""
def ParseArgs(args):
"""Parses command line options.
Returns:
An Namespace from argparse.parse_args()
"""
parser = argparse.ArgumentParser(prog='generate_resource_rewriter')
parser.add_argument('--package-name',
required=True,
help='The package name of ResourceRewriter.')
parser.add_argument('--dep-packages',
required=True,
help='A list of packages whose resource id will be'
'overwritten in ResourceRewriter.')
parser.add_argument('--output-dir',
help='A output directory of generated'
' ResourceRewriter.java')
parser.add_argument('--srcjar',
help='The path of generated srcjar which has'
' ResourceRewriter.java')
return parser.parse_args(args)
def CreateResourceRewriter(package, res_packages, output_dir):
build_utils.MakeDirectory(output_dir)
java_path = os.path.join(output_dir, RESOURCE_REWRITER_JAVA)
template = jinja2.Template(RESOURCE_REWRITER,
trim_blocks=True,
lstrip_blocks=True)
output = template.render(package=package, res_packages=res_packages)
with open(java_path, 'w') as f:
f.write(output)
def CreateResourceRewriterSrcjar(package, res_packages, srcjar_path):
with build_utils.TempDir() as temp_dir:
output_dir = os.path.join(temp_dir, *package.split('.'))
CreateResourceRewriter(package, res_packages, output_dir)
build_utils.DoZip([os.path.join(output_dir, RESOURCE_REWRITER_JAVA)],
srcjar_path,
temp_dir)
def main():
options = ParseArgs(build_utils.ExpandFileArgs(sys.argv[1:]))
package = options.package_name
if options.output_dir:
output_dir = os.path.join(options.output_dir, *package.split('.'))
CreateResourceRewriter(
package,
build_utils.ParseGnList(options.dep_packages),
output_dir)
else:
CreateResourceRewriterSrcjar(
package,
build_utils.ParseGnList(options.dep_packages),
options.srcjar)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
TheTimmy/spack | var/spack/repos/builtin/packages/pacbio-damasker/package.py | 3 | 2074 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PacbioDamasker(MakefilePackage):
"""Damasker: The Dazzler Repeat Masking Suite. This is a special fork
required for some pacbio utilities."""
homepage = "https://github.com/PacificBiosciences/DAMASKER"
url = "https://github.com/PacificBiosciences/DAMASKER"
version('2017-02-11',
git='https://github.com/PacificBiosciences/DAMASKER.git',
commit='144244b77d52cb785cb1b3b8ae3ab6f3f0c63264')
depends_on('gmake', type='build')
def edit(self, spec, prefix):
mkdirp(prefix.bin)
makefile = FileFilter('Makefile')
makefile.filter('DEST_DIR\s*=\s*~/bin', 'DEST_DIR = ' + prefix.bin)
gmf = FileFilter('GNUmakefile')
gmf.filter('rsync\s*-av\s*\$\{ALL\}\s*\$\{PREFIX\}/bin',
'cp ${ALL} ' + prefix.bin)
| lgpl-2.1 |
sznekol/django-cms | cms/south_migrations/0046_move_apphooks.py | 48 | 17147 | # -*- coding: utf-8 -*-
from south.v2 import DataMigration
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(DataMigration):
def forwards(self, orm):
titles = orm['cms.Title'].objects.filter(application_urls__isnull=False).exclude(
application_urls="").select_related()
for title in titles:
page = title.page
page.application_urls = title.application_urls
page.save()
def backwards(self, orm):
for page in orm['cms.Page'].objects.filter(application_urls__isnull=False).exclude(
application_urls=""):
titles = page.title_set.all()
for title in titles:
title.application_urls = page.application_urls
title.save()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')",
'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': (
'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': (
'django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': (
'django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': (
'django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': (
'django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': (
'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': (
'django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': (
'django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': (
'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': (
'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
symmetrical = True
| bsd-3-clause |
Multi2Sim/m2s-bench-parsec-3.0-src | libs/libxml2/src/python/tests/sync.py | 87 | 3508 | #!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
log = ""
class callback:
def startDocument(self):
global log
log = log + "startDocument:"
def endDocument(self):
global log
log = log + "endDocument:"
def startElement(self, tag, attrs):
global log
log = log + "startElement %s %s:" % (tag, attrs)
def endElement(self, tag):
global log
log = log + "endElement %s:" % (tag)
def characters(self, data):
global log
log = log + "characters: %s:" % (data)
def warning(self, msg):
global log
log = log + "warning: %s:" % (msg)
def error(self, msg):
global log
log = log + "error: %s:" % (msg)
def fatalError(self, msg):
global log
log = log + "fatalError: %s:" % (msg)
handler = callback()
log=""
chunk="""<foo><bar2/>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 None:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2></bar2>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 None:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 None:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="1" b='2' />"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 {'a': '1', 'b': '2'}:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="1" b='2' >"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 {'a': '1', 'b': '2'}:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="1" b='2' ></bar2>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 {'a': '1', 'b': '2'}:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="b='1' />"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| bsd-3-clause |
nicholasserra/sentry | src/sentry/migrations/0159_auto__add_field_authidentity_last_verified__add_field_organizationmemb.py | 34 | 36008 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AuthIdentity.last_verified'
db.add_column('sentry_authidentity', 'last_verified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
# Adding field 'OrganizationMember.flags'
db.add_column('sentry_organizationmember', 'flags',
self.gf('django.db.models.fields.BigIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AuthIdentity.last_verified'
db.delete_column('sentry_authidentity', 'last_verified')
# Deleting field 'OrganizationMember.flags'
db.delete_column('sentry_organizationmember', 'flags')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'unique_together': "(('name', 'checksum'),)", 'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
Pinkerton/alexa-tinder | config.py | 1 | 2753 | import json
import requests
import pynder
# class User(object):
# """docstring for User"""
# def __init__(self, bio, name, photo, age, gender, birth_date, jobs, schools, distance_km, common_connections, common_interests):
# super(User, self).__init__()
# self.bio = bio
# self.name = name
# self.photo = photo
# self.age = age
# self.gender = gender
# self.birth_date = birth_date
# self.jobs = jobs
# self.schools = schools
# self.distance_km = distance_km
# self.common_connections = common_connections
# self.common_interests = common_interests
# def like():
# pass
# def dislike():
# pass
# user1 = User('so i hate writing about me things. My friend says i need to move on & so she downloaded this for me i go to school and work. I think I am cute anyways! Lol', 'Sarah Cunningham', ['athletic', 'fashionable', 'beach', 'model', 'dog'], 21, 'girl', '04/22/1994', ['Apple'], ['Stanford University'], 4, ['Moho Choo'], ['Cheese', 'Knitting', 'Code'])
# user2 = User('I am lactose intolerant but cheese pizza is my bae', 'Ace Enok', ['togetherness', 'happiness', 'fine-looking', 'young'], 22, 'male', '01/02/1994', ['HP'], ['Berkeley University'], 10, ['Sarah Cunningham'], ['Cheese Pizza'])
# user3 = User('ENTJ. These things are stupid. Basically, I am an interesting and fun person who likes theatre, beer, rock n roll music, pizza, and sarcasm. If you like any of the 5, we will probably become best friends or whatever. I have also been wanting to start a Lou Reed synth-indie cover band, so lets get that started', 'Zelimkhan Ethan', ['musician', 'festival', 'weapon', 'portrait'], 30, 'male', '01/05/1986', ['Golden Gate University'], 20, ['Ace Enok'], ['Oranges', 'TED', 'Movies'])
# user4 = User('Slippers are my best friend. I dont do sea food', 'Rosa Pinkerton', ['fashion', 'actress', 'pretty', 'joy'], 25, 'girl', '02/09/1991', ['Indeed'], ['Saybrook University'], 15, ['Zelimkhan Ethan'], ['Fish', 'Cakes', 'Rock'])
# users = [user1, user2, user3, user4]
FACEBOOK_PROFILE_ID = '1508286792'
FACEBOOK_SECRET = 'CAAGm0PX4ZCpsBAIG2KyvSaKxhOBZB4ZAqiVEZCQ9BC7kVDRkNVrEaGHbKVyZCxZAqNjOWs289SkZBZAbxrHK1eoAULgKl2GoTNV8WGpZAmF2Pj9JZBtWCVvMzXxLvM7EJKXvH1ZBoDUQmwbeSqHMZAadQo5pEZBpHhJrPYvSYd7ajt4kn3oi4zYQ34xSnLiZBdOao399UXxUNGH25W0wZDZD'
current_index = 0
send_url = 'http://freegeoip.net/json'
r = requests.get(send_url)
j = json.loads(r.text)
lat = j['latitude']
lon = j['longitude']
lolsession = pynder.Session(FACEBOOK_PROFILE_ID, FACEBOOK_SECRET)
lolsession.update_location(lat, lon)
users = [user for user in lolsession.nearby_users() if len(user.bio) > 5]
CLARIFAI_APP_ID = ''
CLARIFAI_APP_SECRET = ''
| mit |
smasala/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/workitems.py | 140 | 3429 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
from model.queuepropertymixin import QueuePropertyMixin
class WorkItems(db.Model, QueuePropertyMixin):
queue_name = db.StringProperty()
item_ids = db.ListProperty(int)
date = db.DateTimeProperty(auto_now_add=True)
@classmethod
def key_for_queue(cls, queue_name):
return "work-items-%s" % (queue_name)
@classmethod
def lookup_by_queue(cls, queue_name):
return cls.get_or_insert(key_name=cls.key_for_queue(queue_name), queue_name=queue_name)
def display_position_for_attachment(self, attachment_id):
"""Returns a 1-based index corresponding to the position
of the attachment_id in the queue. If the attachment is
not in this queue, this returns None"""
if attachment_id in self.item_ids:
return self.item_ids.index(attachment_id) + 1
return None
@staticmethod
def _unguarded_add(key, attachment_id):
work_items = db.get(key)
if attachment_id in work_items.item_ids:
return
work_items.item_ids.append(attachment_id)
work_items.put()
# Because this uses .key() self.is_saved() must be True or this will throw NotSavedError.
def add_work_item(self, attachment_id):
db.run_in_transaction(self._unguarded_add, self.key(), attachment_id)
@staticmethod
def _unguarded_remove(key, attachment_id):
work_items = db.get(key)
if attachment_id in work_items.item_ids:
# We should never have more than one entry for a work item, so we only need remove the first.
work_items.item_ids.remove(attachment_id)
work_items.put()
# Because this uses .key() self.is_saved() must be True or this will throw NotSavedError.
def remove_work_item(self, attachment_id):
db.run_in_transaction(self._unguarded_remove, self.key(), attachment_id)
| bsd-3-clause |
kamal-gade/rockstor-core | src/rockstor/backup/views/plugin.py | 2 | 1165 | """
Copyright (c) 2012-2014 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from system.services import superctl
from generic_view import GenericView
class PluginView(GenericView):
def post(self, request, command):
"""
valid commands are status, off and on.
"""
try:
out, err, rc = superctl('backup-plugin', command)
return Response({'status': out[0].split()[1],})
except:
return Response({'error': err,})
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.