gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""
Tests for the implementation of feature sign search.
"""
__authors__ = "David Warde-Farley"
__copyright__ = "Copyright 2010-2011, Universite de Montreal"
__credits__ = ["David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "David Warde-Farley"
__email__ = "wardefar@iro"
import numpy as np
from pylearn2.optimization.feature_sign import feature_sign_search
class TestFeatureSign(object):
@classmethod
def setup_class(cls):
rng = np.random.RandomState(0)
cls.dictionary = rng.normal(size=(100, 500)) / 1000
cls.dictionary /= np.sqrt((cls.dictionary ** 2).sum(axis=0))
cls.gram = np.dot(cls.dictionary.T, cls.dictionary)
cls.signal = rng.normal(size=100) / 1000
cls.corr = np.dot(cls.dictionary.T, cls.signal)
cls.nzi = [np.array([0, 6, 15, 30, 31, 34,
36, 48, 52, 53, 62, 94,
99, 103, 105, 107, 124, 131,
133, 137, 142, 145, 171, 178,
190, 206, 207, 214, 221, 226,
228, 229, 231, 234, 246, 255,
257, 258, 261, 265, 268, 269,
279, 281, 282, 288, 289, 293,
294, 296, 297, 302, 303, 306,
319, 322, 328, 329, 331, 335,
337, 351, 353, 355, 362, 368,
369, 372, 375, 380, 390, 392,
394, 397, 405, 410, 412, 420,
421, 422, 425, 430, 432, 435,
439, 443, 449, 451, 455, 456,
457, 458, 463, 479, 484, 485,
492, 493, 496]),
np.array([0, 6, 15, 30, 31, 34,
40, 52, 62, 84, 99, 103,
107, 131, 133, 137, 142, 145,
171, 190, 207, 221, 226, 228,
229, 231, 234, 243, 255, 257,
258, 261, 265, 268, 279, 281,
284, 288, 289, 293, 294, 296,
297, 302, 303, 306, 308, 319,
322, 329, 331, 335, 337, 351,
353, 355, 357, 362, 369, 371,
372, 375, 380, 390, 392, 394,
397, 405, 412, 420, 422, 430,
435, 439, 443, 451, 455, 456,
457, 463, 479, 483, 484, 485,
492, 493, 497]),
np.array([0, 6, 15, 21, 28, 31,
34, 40, 52, 62, 75, 103,
133, 164, 171, 190, 226, 228,
229, 231, 234, 255, 258, 261,
265, 268, 279, 284, 289, 293,
294, 296, 303, 306, 308, 319,
322, 335, 351, 355, 360, 369,
371, 372, 375, 380, 392, 394,
397, 420, 422, 430, 435, 439,
443, 451, 455, 456, 457, 463,
479, 483, 485, 492, 493, 497]),
np.array([0, 15, 21, 28, 31, 34,
40, 52, 62, 103, 164, 171,
190, 206, 226, 228, 229, 231,
234, 255, 258, 261, 265, 268,
279, 294, 303, 308, 319, 322,
335, 355, 360, 369, 371, 372,
375, 380, 392, 420, 422, 430,
439, 443, 446, 451, 456, 457,
479, 483, 485, 492, 497]),
np.array([0, 15, 31, 32, 34, 40,
52, 62, 103, 164, 190, 206,
226, 228, 229, 231, 234, 261,
268, 279, 294, 303, 308, 319,
322, 335, 355, 360, 363, 369,
371, 372, 375, 392, 422, 430,
439, 451, 457, 479, 485, 492, 497]),
np.array([0, 32, 40, 52, 103, 164,
182, 190, 229, 234, 268, 294,
303, 319, 335, 355, 360, 363,
369, 371, 372, 375, 392, 422,
430, 439, 479, 485, 492, 497]),
np.array([0, 32, 52, 103, 190, 229,
234, 268, 303, 319, 335, 363,
369, 371, 372, 375, 392, 422,
430, 439, 479]),
np.array([52, 190, 229, 234, 319, 335,
369, 371, 372, 375, 392, 422,
430, 479]),
np.array([52, 229, 234, 335, 369, 371,
372, 375, 422, 430, 479]),
np.array([229, 234, 369, 372, 375, 422, 479]),
np.array([234, 369, 375, 479]),
np.array([234, 369, 375, 479]),
np.array([234, 369]),
np.array([], dtype=np.int32)]
cls.nze = [np.array([-1.9348249481460072e-03, -2.6733387284447153e-04,
-1.3132617311436409e-03, -4.8160077424667838e-04,
-7.5839249350565386e-04, -1.5909426774904003e-03,
5.7527969676995383e-05, 7.9447260531266320e-05,
1.0451809037644966e-03, -4.1613884795362038e-04,
1.5212190467793459e-03, 1.7787664944475309e-04,
-4.8836751622554329e-04, 1.6494321148399042e-03,
-2.7677800129602731e-06, -5.3707951770584682e-04,
-6.5444024170409589e-05, -1.5828515693151556e-04,
-7.2127417409642656e-04, -5.1401896787202958e-04,
3.7758489731919223e-05, 6.6680708523377810e-04,
5.2900822039974530e-04, 3.0975349526476766e-05,
1.2917361992321301e-03, -2.6589905870352928e-04,
4.5817183084630225e-05, 1.3754030475796234e-04,
-1.6320053069891164e-03, 4.5131596808813552e-04,
-2.5421398951701307e-04, 1.8433162492285078e-03,
-9.1173071359616081e-04, 1.7471082488497108e-03,
-5.3360585337509928e-06, 9.1283752277065869e-04,
2.6051969664101067e-05, -2.4057039933035281e-04,
4.1276967786097276e-04, -7.2185718488784083e-04,
7.3222222810349295e-04, -1.1258619069166660e-03,
-5.9539175662470837e-05, 1.2020202196530964e-04,
-6.4852933217228329e-05, -3.6136499613693474e-04,
-2.7509451425501420e-04, 7.8402482743400567e-04,
-1.6375047820679251e-03, -7.6328448464493817e-05,
2.5201048678107204e-04, -5.3343888345009510e-04,
1.3637930948889019e-03, 4.1072719924889650e-05,
-1.5727527265822143e-03, -1.5471429922949904e-03,
-5.6998443448416484e-04, -3.1120566643703262e-04,
-5.6386207057422957e-05, 5.7371833193473369e-04,
3.6188962138560557e-04, -4.7316772908796121e-04,
2.9484545185290238e-04, -6.6535357340597704e-04,
8.4119874640662382e-04, -2.8329017840323988e-05,
-2.5306839397483431e-03, 2.4715013439992202e-03,
-1.1910480261999676e-03, -2.9709479220933098e-04,
6.8431258887171015e-04, -2.7295761244718773e-03,
-2.4093971793902772e-05, 6.2201228574189945e-04,
5.1780698320989707e-04, 2.6733781310107836e-04,
5.6336177344038440e-04, -1.0616591760754300e-03,
3.6947848006075820e-04, 6.7141992520493947e-05,
2.9771817071157870e-04, 5.9797094277252722e-04,
-3.9933204819632510e-04, 8.6517244244875048e-04,
4.4920353680412715e-04, -6.6808772499122302e-04,
-5.3361944478563209e-04, 1.5242227367637563e-03,
5.2896850426062208e-04, 8.9662346856869842e-04,
5.5085221933474243e-04, 6.6736148020986167e-05,
8.1560784413259606e-04, 2.6011480255750508e-04,
6.6830208202212017e-04, 4.1085940128486484e-04,
-1.7682961625122970e-03, 8.3934219900091750e-04,
-2.0873017058309116e-05]),
np.array([-1.1035294455088696e-03, -2.2000890007574505e-04,
-7.8903889419539659e-04, -2.1119535900836997e-04,
-5.8279291450617342e-04, -1.7080583717419850e-03,
-2.6627718634094304e-04, 1.2487012342411459e-03,
1.2674860805574550e-03, 1.0316196854953281e-04,
-3.8989827573487992e-05, 1.3959573167727619e-03,
-2.9263690772758253e-04, -7.5516160142673238e-05,
-3.2027802224615773e-04, -4.0194333313972751e-05,
5.2792602866442842e-05, 1.0030885855889520e-04,
3.4359859807725127e-04, 1.4202578828080572e-03,
4.9435053415793168e-05, -2.9342239639478933e-04,
3.7342261630825908e-04, -1.1357718570444337e-04,
1.4693020881404192e-03, -8.1038826473469011e-04,
1.8665829767200055e-03, -9.6714718311873344e-05,
5.6412327489350621e-04, 7.0701526303205728e-05,
-3.9516191942322022e-04, 6.3432608784393975e-04,
-6.3954164541396520e-04, 4.5235975549458074e-04,
-8.5992159621133923e-05, 1.4804790922793453e-05,
4.0644969429524014e-05, -5.2722126671811232e-05,
-3.6077541732844342e-04, 4.2575453908280664e-04,
-1.1487697489090171e-03, -4.2018807922097055e-04,
3.4790809594425012e-05, -1.6160032947381991e-04,
1.2440480492768469e-03, 1.1231144781693225e-04,
-3.1889994757268988e-05, -1.4839438754208341e-03,
-1.4511548747212302e-03, -1.3590898496856349e-04,
-1.1193045928555190e-04, 1.0102752291013409e-03,
8.5488972321372484e-05, -4.5029116166219244e-04,
1.2167805925053460e-04, -9.8493755243442174e-04,
1.2491091198674258e-04, 3.7378349690722683e-04,
-2.7075691573085138e-03, -4.8129161730357662e-05,
2.6410719309027152e-03, -1.4938221269370467e-03,
-4.7855585389856826e-04, 1.6361688366860866e-04,
-2.1767953133433049e-03, -3.0577449369473172e-04,
1.9800957374812715e-04, 2.1713584774933896e-04,
1.2547286780562931e-04, -8.8978093970981957e-04,
1.8019125328437374e-04, 7.3691032485438852e-04,
6.7509982879728641e-04, 2.7311153047371414e-04,
-6.8500802556878382e-04, 1.3273193785611063e-03,
3.1869297588771220e-04, 8.6925548887159651e-04,
9.4018360395236385e-04, 3.0819494676812361e-04,
2.6271523986771711e-04, 3.0870274205584814e-04,
3.0289953214768245e-04, 1.3749791740379300e-04,
-1.1786735752273350e-03, 5.0250477725954619e-04,
-8.6070034229997804e-05]),
np.array([-8.3437579213689074e-04, -3.0015619460217073e-05,
-5.7897541676280300e-04, 1.6551471495779046e-05,
-2.2393605800632093e-04, -5.2001242860563247e-04,
-1.3870942715484462e-03, -5.9567578646616199e-04,
1.0534240091407204e-03, 9.1585112462395880e-04,
-7.4399294769562441e-05, 1.0651860600130367e-03,
-1.1115812798157547e-04, -1.2644694854842305e-04,
8.0859909014213599e-05, 9.5258814604793707e-04,
4.8694577190044154e-04, -1.2857867793752289e-04,
1.3623765533842772e-03, -6.3098386281712981e-04,
1.8709346040428150e-03, 2.4869691048019343e-04,
-4.4258285202598450e-04, 6.2641533196780173e-04,
-2.1070150676115288e-04, 5.5018802760393299e-04,
-8.6507017547800483e-05, 2.5113468709088080e-05,
-5.9572458159247166e-05, 2.7174990026237772e-04,
-6.8299151173242584e-04, -6.9141524100211209e-05,
9.9677709958487432e-04, 2.1455469032139378e-05,
-1.0931081545008502e-04, -1.4288398610195221e-03,
-9.2641619187052728e-04, 1.1631696831111123e-03,
-2.5008597010047157e-04, -4.9109092205391425e-04,
1.4573367169579149e-05, -2.4556619817285266e-03,
-2.2737442063480292e-04, 2.0873563680032670e-03,
-1.7221092085957831e-03, -3.9149748509506707e-04,
-1.9083890283104121e-03, -4.5179869240156825e-05,
3.1550562053295335e-08, -6.5469640615284299e-04,
2.3552832526085873e-04, 5.9969305766932981e-04,
2.2278964442494262e-04, 3.2359079954438926e-04,
-4.0264229418057637e-04, 7.7866637053198141e-04,
1.1841428769164845e-04, 3.8661136419945486e-04,
6.7602096499568042e-04, 3.1878480353931328e-04,
5.1411896426637957e-04, 2.5156200586072758e-04,
2.2088078507041245e-04, -1.1117185648776493e-03,
2.1497044788086782e-04, -1.0188107377535942e-04
]),
np.array([-6.4713254630098830e-04, -2.7713153029343456e-04,
1.0757385692728924e-04, -1.2262501147393618e-04,
-2.3059082888739592e-04, -7.3161012508945306e-04,
-5.2413912135354621e-04, 6.2111326346937247e-04,
4.3335174642597019e-04, 8.9356967523135681e-04,
-3.3763242273813105e-04, 3.3430657447490773e-05,
6.2191414452732641e-04, -7.4132022612954130e-06,
5.3662340320133003e-04, -1.4178212558739250e-04,
1.3095988871201910e-03, -3.1597818931136630e-04,
2.0378913237038515e-03, 6.6562531139041254e-05,
-1.6868955601997661e-04, 2.8713705809760455e-04,
-1.2304693935549571e-04, 4.4141928313035041e-04,
-1.2253018622955899e-04, -4.6171051458478255e-04,
7.3723710785185957e-04, -1.9032537033656971e-04,
-1.2697562952863858e-03, -5.2482737659127472e-04,
1.2061124796596517e-03, -4.0817504185725420e-04,
2.5998882856128638e-05, -2.0966819569876341e-03,
-2.7608734302861750e-04, 1.6726478730511611e-03,
-1.6663463119142058e-03, -1.0586294396151170e-04,
-1.6941836184160486e-03, -2.5946770642156560e-04,
3.5633343009712243e-04, 6.9737388074824142e-04,
3.5050766641770261e-04, -6.2751827095089259e-05,
1.2404656899160754e-04, 3.5941380874020181e-04,
3.4446046190986252e-05, 4.2103096984376170e-04,
5.3026111408425991e-04, 5.1273187361432225e-05,
3.5432933330489038e-04, -7.4663640180765406e-04,
-3.8335424804281977e-04]),
np.array([-5.1836610661442424e-04, -1.0477410668018961e-04,
-5.7495834485112783e-05, 1.7900407122394070e-04,
-2.6703148129370935e-04, -3.9176848759322184e-04,
5.8091871319957496e-04, 1.6462922992537709e-04,
6.5002017969994672e-04, -3.2338128176255906e-04,
3.9479040414653807e-04, -2.7188461309840551e-05,
3.1135232326379012e-04, -2.8133206599649399e-05,
1.2641601901411529e-03, -1.0536469984737687e-04,
2.0205599857680634e-03, 1.0750521985466104e-05,
3.3091039446384973e-04, -6.4895294462220010e-05,
-2.1369409832986251e-04, 4.5576174900652586e-04,
-3.1012129723400486e-05, -1.1405083021739002e-03,
-8.5555280955904731e-05, 1.1637843624565714e-03,
-2.3051364523934005e-04, 3.4620317476115259e-05,
1.3934400265841262e-04, -1.6160179207995579e-03,
-3.5463876387457341e-04, 1.2476670438728542e-03,
-1.5272790887738484e-03, -1.2188452493555049e-03,
4.1391946602114946e-04, 5.7720967689920983e-04,
3.9160014684697103e-04, 7.9576153790110988e-05,
1.2980545083543301e-04, 5.7890412759707829e-04,
2.5677418172492382e-04, -3.4102744376433380e-04,
-3.8079926681336754e-04]),
np.array([-2.9393635354609123e-04, 2.8338417294713878e-04,
-7.1322391135519907e-05, 6.0928102145436805e-04,
4.1117051502882046e-04, -1.3597922568600674e-04,
3.5892355459974317e-06, 2.5647272365454443e-04,
1.0960456734638818e-03, 1.8670751666828435e-03,
1.7805877172321683e-04, -4.9436006980503034e-05,
2.8396916362562023e-04, -9.6659831830055534e-04,
9.8937911969777977e-04, -5.9732960363989054e-05,
6.0131277001765439e-06, 2.0633055937450479e-04,
-1.3238747581454256e-03, -4.4239264759708273e-04,
9.3309879025114137e-04, -1.3254818250397289e-03,
-7.4066565997860437e-04, 3.8820311348479301e-04,
4.3545224753910783e-04, 2.9649776286609744e-04,
6.5031628085758639e-04, 1.0771789715400958e-04,
-1.6633242528597823e-05, -1.2235122833187349e-04
]),
np.array([-4.6467519143475876e-05, 1.0821123640127927e-04,
5.2011630846811980e-04, 1.5331854905217915e-04,
1.3866548495350116e-04, 8.2075898345678074e-04,
1.6343894091814725e-03, 1.0471892103365427e-04,
9.4287873260011381e-05, -6.0720057514918534e-04,
7.3927770528263870e-04, 7.3587103118350852e-05,
-1.2461689301653100e-03, -3.7588254684182553e-04,
7.0300087714795709e-04, -1.1703586963490017e-03,
-3.4761993323616724e-04, 3.2959907503653533e-04,
3.7357318166526934e-04, 1.2210075043889726e-04,
6.8778984271716630e-04]),
np.array([3.8272388058376711e-04, 1.9899166301244989e-05,
5.6189540105860936e-04, 1.3899178409636488e-03,
-2.5900269633353400e-04, 4.6538791234591143e-04,
-1.1071682569346324e-03, -2.6088142077321339e-04,
4.6133971870889563e-04, -9.8984072841656542e-04,
-3.7539187986192293e-06, 2.6845217976317101e-04,
2.4946606382563613e-04, 6.8309969710756292e-04
]),
np.array([1.6472832434094874e-04, 3.4060230114089002e-04,
1.1427152271565732e-03, 1.9920279319872669e-04,
-9.6452219060174412e-04, -1.0812607675017613e-04,
2.7900553513436648e-04, -8.0628617478484157e-04,
1.5476496796440077e-04, 4.9104287902516419e-05,
6.1965472101718983e-04]),
np.array([1.7454378937657987e-04, 9.4360910010814669e-04,
-7.7576992971679156e-04, 8.8628243235214528e-05,
-6.1000343573593921e-04, 1.0084201597356237e-05,
5.3306901959571291e-04]),
np.array([0.000713547849065, -0.0005803708454116,
-0.0003480663897498, 0.000378159050198]),
np.array([4.6907206821826032e-04, -3.5326757239539523e-04,
-9.6124150391964280e-05, 1.9113349844253737e-04
]),
np.array([0.000230238402802, -0.0001260715414588]),
np.array([], dtype=np.float64)]
cls.penalties = [0.0001, 0.0006, 0.0011, 0.0016,
0.0021, 0.0026, 0.0031, 0.0036,
0.0041, 0.0046, 0.0051, 0.0056,
0.0061, 0.0066]
@classmethod
def teardown_class(cls):
del cls.signal
del cls.dictionary
del cls.nzi
del cls.nze
del cls.penalties
def test_driver(self):
for index in range(len(self.penalties)):
solution_vector = np.zeros(self.dictionary.shape[1])
feature_sign_search(self.dictionary, self.signal,
self.penalties[index],
solution=solution_vector)
yield self.check_against_reference, solution_vector, index
yield self.check_zerocoef_optimality_cond, solution_vector, index
yield self.check_nonzero_optimality_cond, solution_vector, index
yield self.check_zeros_against_reference, solution_vector, index
yield self.check_nonzeros_against_reference, solution_vector, index
def check_zeros_against_reference(self, solution, index):
z_ref = np.setdiff1d(np.arange(solution.shape[0]), self.nzi[index])
z_ind = np.where(solution == 0)[0]
assert z_ref.shape == z_ind.shape
assert np.all(z_ref == z_ind)
def check_nonzeros_against_reference(self, solution, index):
nz_ref = self.nzi[index]
nz_ind = np.where(solution != 0)[0]
assert nz_ref.shape == nz_ind.shape
assert np.all(nz_ref == nz_ind)
def check_against_reference(self, solution, index):
reference = np.zeros(self.dictionary.shape[1])
reference[self.nzi[index]] = self.nze[index]
assert np.allclose(solution, reference)
def check_zerocoef_optimality_cond(self, solution, index):
sparsity = self.penalties[index]
grad = - 2 * self.corr + 2 * np.dot(self.gram, solution)
grad[solution == 0]
signs = np.sign(solution)
assert np.all(abs(grad[signs == 0]) <= sparsity)
def check_nonzero_optimality_cond(self, solution, index):
sparsity = self.penalties[index]
grad = - 2 * self.corr + 2 * np.dot(self.gram, solution)
grad[solution == 0]
signs = np.sign(solution)
nzgrad = grad[signs != 0] + sparsity * signs[signs != 0]
np.testing.assert_almost_equal(nzgrad, np.zeros(nzgrad.shape))
def test_shape_rank_matches_1d_generated(self):
sparsity = self.penalties[0]
solution = feature_sign_search(self.dictionary, self.signal, sparsity)
assert solution.ndim == 1
assert solution.shape[0] == self.dictionary.shape[1]
def test_shape_rank_matches_2d_generated(self):
sparsity = self.penalties[0]
signal = self.signal.reshape(1, -1)
solution = feature_sign_search(self.dictionary, signal, sparsity)
assert solution.ndim == 2
assert solution.shape[0] == 1
assert solution.shape[1] == self.dictionary.shape[1]
def test_solution_identity_1d_provided(self):
sparsity = self.penalties[0]
solution = np.zeros(self.dictionary.shape[1])
newsol = feature_sign_search(self.dictionary, self.signal, sparsity,
solution=solution)
assert solution is newsol
def test_solution_identity_2d_provided(self):
sparsity = self.penalties[0]
solution = np.zeros((1, self.dictionary.shape[1]))
signal = self.signal.reshape(1, -1)
newsol = feature_sign_search(self.dictionary, signal, sparsity,
solution=solution)
assert solution is newsol
| |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from gevent import sleep
from iris.constants import EMAIL_SUPPORT, IM_SUPPORT
from smtplib import SMTP
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from iris import cache
import quopri
import time
import markdown
import dns.resolver
import dns.exception
import logging
logger = logging.getLogger(__name__)
class iris_smtp(object):
supports = frozenset([EMAIL_SUPPORT, IM_SUPPORT])
last_autoscale_mx_lookup = {}
def __init__(self, config):
self.config = config
self.retry_interval = config.get('retry_interval', 0)
self.modes = {
EMAIL_SUPPORT: self.send_email,
IM_SUPPORT: self.send_email,
}
self.mx_sorted = []
self.last_conn = None
self.last_conn_server = None
self.smtp_timeout = config.get('timeout', 10)
if config.get('smtp_server'):
# mock mx record
self.mx_sorted.append((0, config['smtp_server']))
elif config.get('smtp_gateway'):
try:
mx_hosts = dns.resolver.query(config['smtp_gateway'], 'MX')
except dns.exception.DNSException as e:
mx_hosts = []
raise Exception('MX error: %s' % e)
for mx in mx_hosts:
mx_hostname = mx.exchange.to_text().strip('.')
self.mx_sorted.append((mx.preference, mx_hostname))
self.mx_sorted.sort(key=lambda tup: tup[0])
else:
raise ValueError('Missing SMTP config for sender')
def send_email(self, message, customizations=None):
md = markdown.Markdown()
start = time.time()
m = MIMEMultipart('alternative')
from_address = self.config['from']
application = message.get('application')
if application:
m['X-IRIS-APPLICATION'] = application
address = cache.applications.get(application, {}).get('custom_sender_addresses', {}).get('email')
if address is not None:
from_address = address
priority = message.get('priority')
if priority:
m['X-IRIS-PRIORITY'] = priority
plan = message.get('plan')
if plan:
m['X-IRIS-PLAN'] = plan
incident_id = message.get('incident_id')
if incident_id:
m['X-IRIS-INCIDENT-ID'] = str(incident_id)
m['Date'] = formatdate(localtime=True)
m['from'] = from_address
if message.get('multi-recipient'):
m['to'] = ','.join(set(message['destination']))
if message['bcc_destination']:
m['bcc'] = ','.join(set(message['bcc_destination']))
else:
m['to'] = message['destination']
if message.get('noreply'):
m['reply-to'] = m['to']
if 'email_subject' in message:
m['subject'] = message['email_subject']
else:
m['subject'] = message['subject']
plaintext = None
if 'email_text' in message:
plaintext = message['email_text']
elif 'body' in message:
plaintext = message['body']
if plaintext:
mt = MIMEText(None, 'plain', 'utf-8')
mt.set_payload(quopri.encodestring(plaintext.encode('UTF-8')))
mt.replace_header('Content-Transfer-Encoding', 'quoted-printable')
m.attach(mt)
# for tracking messages, email_html is not required, so it's possible
# that both of the following keys are missing from message
html = None
if 'email_html' in message:
html = message['email_html']
elif 'body' in message:
html = md.convert(message['body'])
if html:
if 'extra_html' in message:
html += message['extra_html']
# We need to have body tags for the oneclick buttons to properly parse
html = '<body>\n' + html + '\n</body>'
mt = MIMEText(None, 'html', 'utf-8')
# Google does not like base64 encoded emails for the oneclick button functionalty,
# so force quoted printable.
mt.set_payload(quopri.encodestring(html.encode('UTF-8')))
mt.replace_header('Content-Transfer-Encoding', 'quoted-printable')
m.attach(mt)
conn = None
if message.get('multi-recipient'):
email_recipients = message['destination'] + message['bcc_destination']
else:
email_recipients = [message['destination']]
# Try reusing previous connection in this worker if we have one
if self.last_conn:
conn = self.last_conn
else:
for mx in self.mx_sorted:
try:
smtp = SMTP(timeout=self.smtp_timeout)
smtp.connect(mx[1], self.config.get('port', 25))
if self.config.get('username', None) is not None and self.config.get('password', None) is not None:
smtp.login(self.config.get('username', None), self.config.get('password', None))
conn = smtp
self.last_conn = conn
self.last_conn_server = mx[1]
break
except Exception as e:
logger.exception(e)
if not conn:
raise Exception('Failed to get smtp connection.')
try:
conn.sendmail([from_address], email_recipients, m.as_string())
except Exception:
logger.warning('Failed sending email through %s. Will try connecting again and resending.', self.last_conn_server)
try:
conn.quit()
except Exception:
pass
# If we can't send it, try reconnecting and then sending it one more time before
# giving up
for mx in self.mx_sorted:
try:
smtp = SMTP(timeout=self.smtp_timeout)
smtp.connect(mx[1], 25)
conn = smtp
self.last_conn = conn
self.last_conn_server = mx[1]
break
except Exception as e:
logger.exception('Failed reconnecting to %s to send message', self.last_conn_server)
self.last_conn = None
return None
try:
# If configured, sleep to back-off on connection
if self.retry_interval:
sleep(self.retry_interval)
conn.sendmail([from_address], email_recipients, m.as_string())
logger.info('Message successfully sent through %s after reconnecting', self.last_conn_server)
except Exception:
logger.exception('Failed sending email through %s after trying to reconnect', self.last_conn_server)
return None
return time.time() - start
def send(self, message, customizations=None):
return self.modes[message['mode']](message, customizations)
def cleanup(self):
if self.last_conn:
logger.info('Trying to quit smtp connection to %s', self.last_conn_server)
try:
self.last_conn.quit()
except Exception:
pass
@classmethod
def determine_worker_count(cls, vendor):
mx_gateway = vendor.get('smtp_gateway')
connections_per_mx = int(vendor.get('tasks_per_mx', 4))
last_lookup = cls.last_autoscale_mx_lookup.get(mx_gateway)
now = time.time()
if last_lookup and last_lookup[0] > now:
logger.info('Using old MX results for %s; next MX refresh in %d seconds', mx_gateway, last_lookup[0] - now)
return last_lookup[1]
else:
logger.info('Refreshing MX records for %s', mx_gateway)
try:
mx_result = dns.resolver.query(mx_gateway, 'MX')
mx_hosts = [mx.exchange.to_text().strip('.') for mx in mx_result]
except dns.exception.DNSException as e:
mx_hosts = []
if last_lookup:
logger.error('Failed looking up MX: %s; returning old results.', e)
return last_lookup[1]
else:
raise Exception('MX error, and we don\'t have old results to fall back on: %s' % e)
mx_host_counts = {mx: connections_per_mx for mx in mx_hosts}
cls.last_autoscale_mx_lookup[mx_gateway] = (mx_result.expiration, mx_host_counts)
logger.info('Next MX refresh for %s in %d seconds', mx_gateway, mx_result.expiration - now)
return mx_host_counts
| |
from datetime import date
import os
import shutil
import tempfile
from unittest import TestCase
from regparser.history.delays import FRDelay
from regparser.notice import xml as notice_xml
from regparser.test_utils.xml_builder import XMLBuilder
import settings
class NoticeXMLLocalCopiesTests(TestCase):
"""Tests specifically related to xml.local_copies, which has significant
setup/teardown"""
def setUp(self):
self.dir1 = tempfile.mkdtemp()
self.dir2 = tempfile.mkdtemp()
self._original_local_xml_paths = settings.LOCAL_XML_PATHS
settings.LOCAL_XML_PATHS = [self.dir1, self.dir2]
self.url = 'http://example.com/some/url'
def tearDown(self):
settings.LOCAL_XML_PATHS = self._original_local_xml_paths
shutil.rmtree(self.dir1)
shutil.rmtree(self.dir2)
def test_empty(self):
"""If no copy is present, we get an empty list"""
self.assertEqual([], notice_xml.local_copies(self.url))
os.mkdir(os.path.join(self.dir1, "some"))
self.assertEqual([], notice_xml.local_copies(self.url))
def test_order(self):
"""The first source will override the second"""
url = 'http://example.com/some/url'
paths = []
for d in (self.dir1, self.dir2):
os.mkdir(os.path.join(d, "some"))
paths.append(os.path.join(d, "some", "url"))
with open(paths[1], "w") as f:
f.write('aaaaa')
self.assertEqual([paths[1]], notice_xml.local_copies(url))
with open(paths[0], "w") as f:
f.write('bbbbb')
self.assertEqual([paths[0]], notice_xml.local_copies(url))
def test_splits(self):
"""If multiple files are present from a single source, return all"""
url = 'http://example.com/xml/503.xml'
os.mkdir(os.path.join(self.dir1, 'xml'))
paths = []
for i in range(3):
path = os.path.join(self.dir1, 'xml', '503-{}.xml'.format(i + 1))
paths.append(path)
with open(path, 'w') as f:
f.write(str(i)*10)
self.assertEqual(set(paths), set(notice_xml.local_copies(url)))
class NoticeXMLTests(TestCase):
"""Tests for the NoticeXML class"""
def test_set_meta_data(self):
"""Several pieces of meta data should be set within the XML. We test
that the NoticeXML wrapper can retrieve them and that, if we re-read
the XML, they can still be pulled out"""
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Some content")
ctx.PRTPAGE(P="455")
xml = notice_xml.NoticeXML(ctx.xml)
xml.effective = '2005-05-05'
xml.published = '2004-04-04'
xml.fr_volume = 22
self.assertEqual(xml.effective, date(2005, 5, 5))
self.assertEqual(xml.published, date(2004, 4, 4))
self.assertEqual(xml.fr_volume, 22)
xml = notice_xml.NoticeXML(xml.xml_str())
self.assertEqual(xml.effective, date(2005, 5, 5))
self.assertEqual(xml.published, date(2004, 4, 4))
self.assertEqual(xml.fr_volume, 22)
def test_set_effective_date_create(self):
"""The DATES tag should get created if not present in the XML"""
xml = notice_xml.NoticeXML(XMLBuilder('ROOT').xml)
xml.effective = '2005-05-05'
self.assertEqual(xml.effective, date(2005, 5, 5))
xml = notice_xml.NoticeXML(xml.xml_str())
self.assertEqual(xml.effective, date(2005, 5, 5))
def test_derive_effective_date(self):
"""Effective date can be derived from the dates strings. When it is
derived, it should also be set on the notice xml"""
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Effective on May 4, 2004")
xml = notice_xml.NoticeXML(ctx.xml)
xml.effective = '2002-02-02'
self.assertEqual(xml.derive_effective_date(), date(2004, 5, 4))
self.assertEqual(xml.effective, date(2004, 5, 4))
def test_delays(self):
"""The XML should be search for any delaying text"""
with XMLBuilder("ROOT") as ctx:
with ctx.EFFDATE():
ctx.P("The effective date of 11 FR 100 has been delayed "
"until April 1, 2010. The effective date of 11 FR 200 "
"has also been delayed until October 10, 2010")
xml = notice_xml.NoticeXML(ctx.xml)
self.assertEqual(
xml.delays(),
[FRDelay(11, 100, date(2010, 4, 1)),
FRDelay(11, 200, date(2010, 10, 10))])
def test_source_is_local(self):
for url in ('https://example.com', 'http://example.com'):
self.assertFalse(
notice_xml.NoticeXML('<ROOT/>', source=url).source_is_local)
for path in ('./dot/relative', 'normal/relative', '/absolute/ref'):
self.assertTrue(
notice_xml.NoticeXML('<ROOT/>', source=path).source_is_local)
def test_derive_agencies_simple(self):
"""
Test that we can properly derive agency info from the metadata or the
XML itself, and that it's added to the XML.
"""
agencies_info = [{
u'name': u'Environmental Protection Agency',
u'parent_id': None,
u'raw_name': u'ENVIRONMENTAL PROTECTION AGENCY',
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/environmental-protection-agency'),
u'json_url': u'%s%s' % ('https://www.federalregister.gov/',
'api/v1/agencies/145.json'),
u'id': 145
}]
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Effective on May 4, 2004")
xml = notice_xml.NoticeXML(ctx.xml)
xml.derive_agencies(agencies=agencies_info)
self.assertEquals(len(xml.xpath("//EREGS_AGENCIES")), 1)
eregs_agencies = xml.xpath("//EREGS_AGENCIES")[0]
self.assertEquals(len(eregs_agencies.xpath("//EREGS_AGENCY")), 1)
epa = eregs_agencies.xpath("//EREGS_AGENCY")[0]
self.assertEquals(epa.attrib["name"],
"Environmental Protection Agency")
self.assertEquals(epa.attrib["raw-name"],
"ENVIRONMENTAL PROTECTION AGENCY")
self.assertEquals(epa.attrib["agency-id"], "145")
def test_derive_agencies_singlesub(self):
"""
Test that we can properly derive agency info from the metadata and add
it to the XML if there is a subagency.
"""
agencies_info = [
{
u'name': u'Justice Department',
u'parent_id': None,
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/justice-department'),
u'raw_name': u'DEPARTMENT OF JUSTICE',
u'json_url': u'%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/268.json'),
u'id': 268
},
{
u'name': u'Alcohol, Tobacco, Firearms, and Explosives Bureau',
u'parent_id': 268,
u'url': '%s%s%s' % (u'https://www.federalregister.gov/',
u'agencies/alcohol-tobacco-firearms',
u'-and-explosives-bureau'),
u'raw_name': '%s%s' % (u'Bureau of Alcohol, Tobacco, Firearms',
u' and Explosives'),
u'json_url': '%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/19.json'),
u'id': 19
}
]
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Effective on May 4, 2004")
xml = notice_xml.NoticeXML(ctx.xml)
xml.derive_agencies(agencies=agencies_info)
self.assertEquals(len(xml.xpath("//EREGS_AGENCIES")), 1)
eregs_agencies = xml.xpath("//EREGS_AGENCIES")[0]
self.assertEquals(len(eregs_agencies.xpath("//EREGS_AGENCY")), 1)
doj = eregs_agencies.xpath("//EREGS_AGENCY")[0]
self.assertEquals(doj.attrib["name"], "Justice Department")
self.assertEquals(doj.attrib["raw-name"], "DEPARTMENT OF JUSTICE")
self.assertEquals(doj.attrib["agency-id"], "268")
self.assertEquals(len(doj.xpath("//EREGS_SUBAGENCY")), 1)
atf = doj.xpath("//EREGS_SUBAGENCY")[0]
self.assertEquals(atf.attrib["name"],
"Alcohol, Tobacco, Firearms, and Explosives Bureau")
self.assertEquals(
atf.attrib["raw-name"],
"Bureau of Alcohol, Tobacco, Firearms and Explosives")
self.assertEquals(atf.attrib["agency-id"], "19")
def test_derive_agencies_unrelated(self):
"""
Test that we can properly derive agency info from the metadata and add
it to the XML if there is an agency and a non-child subagency.
"""
agencies_info = [
{
u'name': u'Treasury Department',
u'parent_id': None,
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/treasury-department'),
u'raw_name': u'DEPARTMENT OF THE TREASURY',
u'json_url': u'%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/497.json'),
u'id': 497
},
{
u'name': u'Alcohol, Tobacco, Firearms, and Explosives Bureau',
u'parent_id': 268,
u'url': '%s%s%s' % (u'https://www.federalregister.gov/',
u'agencies/alcohol-tobacco-firearms',
u'-and-explosives-bureau'),
u'raw_name': '%s%s' % (u'Bureau of Alcohol, Tobacco, Firearms',
u' and Explosives'),
u'json_url': '%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/19.json'),
u'id': 19
}
]
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Effective on May 4, 2004")
xml = notice_xml.NoticeXML(ctx.xml)
xml.derive_agencies(agencies=agencies_info)
self.assertEquals(len(xml.xpath("//EREGS_AGENCIES")), 1)
eregs_agencies = xml.xpath("//EREGS_AGENCIES")[0]
self.assertEquals(len(eregs_agencies.xpath("//EREGS_AGENCY")), 1)
treas = eregs_agencies.xpath("//EREGS_AGENCY")[0]
self.assertEquals(treas.attrib["name"], "Treasury Department")
self.assertEquals(treas.attrib["raw-name"],
"DEPARTMENT OF THE TREASURY")
self.assertEquals(treas.attrib["agency-id"], "497")
self.assertEquals(len(eregs_agencies.xpath("//EREGS_SUBAGENCY")), 1)
atf = eregs_agencies.xpath("//EREGS_SUBAGENCY")[0]
self.assertEquals(atf.attrib["name"],
u'Alcohol, Tobacco, Firearms, and Explosives Bureau')
self.assertEquals(
atf.attrib["raw-name"],
u"Bureau of Alcohol, Tobacco, Firearms and Explosives")
self.assertEquals(atf.attrib["agency-id"], "19")
def test_derive_agencies_mixed(self):
"""
Test that we can properly derive agency info from the metadata and add
it to the XML if we have a parent-child relationship and an unrelated
agency.
"""
agencies_info = [
{
u'name': u'Treasury Department',
u'parent_id': None,
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/treasury-department'),
u'raw_name': u'DEPARTMENT OF THE TREASURY',
u'json_url': u'%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/497.json'),
u'id': 497
},
{
u'name': u'Alcohol, Tobacco, Firearms, and Explosives Bureau',
u'parent_id': 268,
u'url': '%s%s%s' % (u'https://www.federalregister.gov/',
u'agencies/alcohol-tobacco-firearms',
u'-and-explosives-bureau'),
u'raw_name': '%s%s' % (u'Bureau of Alcohol, Tobacco, Firearms',
u' and Explosives'),
u'json_url': '%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/19.json'),
u'id': 19
},
{
u'name': u'Justice Department',
u'parent_id': None,
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/justice-department'),
u'raw_name': u'DEPARTMENT OF JUSTICE',
u'json_url': u'%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/268.json'),
u'id': 268
}
]
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Effective on May 4, 2004")
xml = notice_xml.NoticeXML(ctx.xml)
xml.derive_agencies(agencies=agencies_info)
self.assertEquals(len(xml.xpath("//EREGS_AGENCIES")), 1)
eregs_agencies = xml.xpath("//EREGS_AGENCIES")[0]
self.assertEquals(len(eregs_agencies.xpath("//EREGS_AGENCY")), 2)
treas = eregs_agencies.xpath("//EREGS_AGENCY")[0]
self.assertEquals(treas.attrib["name"], "Treasury Department")
self.assertEquals(treas.attrib["raw-name"],
"DEPARTMENT OF THE TREASURY")
self.assertEquals(treas.attrib["agency-id"], "497")
doj = eregs_agencies.xpath("//EREGS_AGENCY")[1]
self.assertEquals(doj.attrib["name"], "Justice Department")
self.assertEquals(doj.attrib["raw-name"], "DEPARTMENT OF JUSTICE")
self.assertEquals(doj.attrib["agency-id"], "268")
self.assertEquals(len(doj.xpath("//EREGS_SUBAGENCY")), 1)
atf = doj.xpath("//EREGS_SUBAGENCY")[0]
self.assertEquals(atf.attrib["name"],
u'Alcohol, Tobacco, Firearms, and Explosives Bureau')
self.assertEquals(
atf.attrib["raw-name"],
u"Bureau of Alcohol, Tobacco, Firearms and Explosives")
self.assertEquals(atf.attrib["agency-id"], "19")
def test_derive_agencies_generations(self):
"""
Test that we can properly derive agency info from the metadata and add
it to the XML if we have nested parent-child relationships.
"""
agencies_info = [
{
u'name': u'ATF subagency',
u'parent_id': 19,
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/atf-subagency'),
u'raw_name': u'SUBAGENCY OF ATF',
u'json_url': u'%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/100023.json'),
u'id': 100023
},
{
u'name': u'Alcohol, Tobacco, Firearms, and Explosives Bureau',
u'parent_id': 268,
u'url': '%s%s%s' % (u'https://www.federalregister.gov/',
u'agencies/alcohol-tobacco-firearms',
u'-and-explosives-bureau'),
u'raw_name': '%s%s' % (u'Bureau of Alcohol, Tobacco, Firearms',
u' and Explosives'),
u'json_url': '%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/19.json'),
u'id': 19
},
{
u'name': u'Justice Department',
u'parent_id': None,
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/justice-department'),
u'raw_name': u'DEPARTMENT OF JUSTICE',
u'json_url': u'%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/268.json'),
u'id': 268
},
{
u'name': u'ATF subsubagency',
u'parent_id': 100023,
u'url': u'%s%s' % (u'https://www.federalregister.gov/',
u'agencies/atf-subsubagency'),
u'raw_name': u'SUBSUBAGENCY OF ATF',
u'json_url': u'%s%s' % (u'https://www.federalregister.gov/',
u'api/v1/agencies/100072.json'),
u'id': 100072
},
]
with XMLBuilder("ROOT") as ctx:
with ctx.DATES():
ctx.P("Effective on May 4, 2004")
xml = notice_xml.NoticeXML(ctx.xml)
xml.derive_agencies(agencies=agencies_info)
self.assertEquals(len(xml.xpath("//EREGS_AGENCIES")), 1)
eregs_agencies = xml.xpath("//EREGS_AGENCIES")[0]
self.assertEquals(len(eregs_agencies.xpath("//EREGS_AGENCY")), 1)
doj = eregs_agencies.xpath("//EREGS_AGENCY")[0]
self.assertEquals(doj.attrib["name"], "Justice Department")
self.assertEquals(doj.attrib["raw-name"], "DEPARTMENT OF JUSTICE")
self.assertEquals(doj.attrib["agency-id"], "268")
self.assertEquals(len(doj.xpath("//EREGS_SUBAGENCY")), 3)
self.assertEquals(len(doj.xpath("EREGS_SUBAGENCY")), 1)
atf = doj.xpath("//EREGS_SUBAGENCY")[0]
self.assertEquals(atf.attrib["name"],
u'Alcohol, Tobacco, Firearms, and Explosives Bureau')
self.assertEquals(
atf.attrib["raw-name"],
"Bureau of Alcohol, Tobacco, Firearms and Explosives")
self.assertEquals(atf.attrib["agency-id"], "19")
self.assertEquals(len(atf.xpath("EREGS_SUBAGENCY")), 1)
subatf = atf.xpath("EREGS_SUBAGENCY")[0]
self.assertEquals(subatf.attrib["name"], u'ATF subagency')
self.assertEquals(subatf.attrib["raw-name"], u"SUBAGENCY OF ATF")
self.assertEquals(subatf.attrib["agency-id"], u"100023")
subsubatf = subatf.xpath("EREGS_SUBAGENCY")[0]
self.assertEquals(subsubatf.attrib["name"], u'ATF subsubagency')
self.assertEquals(subsubatf.attrib["raw-name"], u"SUBSUBAGENCY OF ATF")
self.assertEquals(subsubatf.attrib["agency-id"], u"100072")
| |
# SFTP storage backend for Django.
# Author: Brent Tubbs <brent.tubbs@gmail.com>
# License: MIT
#
# Modeled on the FTP storage by Rafal Jonca <jonca.rafal@gmail.com>
import getpass
import io
import os
import posixpath
import stat
from datetime import datetime
from urllib.parse import urljoin
import paramiko
from django.core.files.base import File
from django.utils.deconstruct import deconstructible
from storages.base import BaseStorage
from storages.utils import setting
@deconstructible
class SFTPStorage(BaseStorage):
def __init__(self, **settings):
super().__init__(**settings)
self._host = self.host
self._params = self.params
self._interactive = self.interactive
self._file_mode = self.file_mode
self._dir_mode = self.dir_mode
self._uid = self.uid
self._gid = self.gid
self._known_host_file = self.known_host_file
self._root_path = self.root_path
self._base_url = self.base_url
self._sftp = None
def get_default_settings(self):
return {
'host': setting('SFTP_STORAGE_HOST'),
'params': setting('SFTP_STORAGE_PARAMS', {}),
'interactive': setting('SFTP_STORAGE_INTERACTIVE', False),
'file_mode': setting('SFTP_STORAGE_FILE_MODE'),
'dir_mode': setting('SFTP_STORAGE_DIR_MODE'),
'uid': setting('SFTP_STORAGE_UID'),
'gid': setting('SFTP_STORAGE_GID'),
'known_host_file': setting('SFTP_KNOWN_HOST_FILE'),
'root_path': setting('SFTP_STORAGE_ROOT', ''),
'base_url': setting('MEDIA_URL'),
}
def _connect(self):
self._ssh = paramiko.SSHClient()
known_host_file = self._known_host_file or os.path.expanduser(
os.path.join("~", ".ssh", "known_hosts")
)
if os.path.exists(known_host_file):
self._ssh.load_host_keys(known_host_file)
# and automatically add new host keys for hosts we haven't seen before.
self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self._ssh.connect(self._host, **self._params)
except paramiko.AuthenticationException as e:
if self._interactive and 'password' not in self._params:
# If authentication has failed, and we haven't already tried
# username/password, and configuration allows it, then try
# again with username/password.
if 'username' not in self._params:
self._params['username'] = getpass.getuser()
self._params['password'] = getpass.getpass()
self._connect()
else:
raise paramiko.AuthenticationException(e)
if self._ssh.get_transport():
self._sftp = self._ssh.open_sftp()
@property
def sftp(self):
"""Lazy SFTP connection"""
if not self._sftp or not self._ssh.get_transport().is_active():
self._connect()
return self._sftp
def _remote_path(self, name):
return posixpath.join(self._root_path, name)
def _open(self, name, mode='rb'):
return SFTPStorageFile(name, self, mode)
def _read(self, name):
remote_path = self._remote_path(name)
return self.sftp.open(remote_path, 'rb')
def _chown(self, path, uid=None, gid=None):
"""Set uid and/or gid for file at path."""
# Paramiko's chown requires both uid and gid, so look them up first if
# we're only supposed to set one.
if uid is None or gid is None:
attr = self.sftp.stat(path)
uid = uid or attr.st_uid
gid = gid or attr.st_gid
self.sftp.chown(path, uid, gid)
def _mkdir(self, path):
"""Create directory, recursing up to create parent dirs if
necessary."""
parent = posixpath.dirname(path)
if not self.exists(parent):
self._mkdir(parent)
self.sftp.mkdir(path)
if self._dir_mode is not None:
self.sftp.chmod(path, self._dir_mode)
if self._uid or self._gid:
self._chown(path, uid=self._uid, gid=self._gid)
def _save(self, name, content):
"""Save file via SFTP."""
content.open()
path = self._remote_path(name)
dirname = posixpath.dirname(path)
if not self.exists(dirname):
self._mkdir(dirname)
f = self.sftp.open(path, 'wb')
f.write(content.file.read())
f.close()
# set file permissions if configured
if self._file_mode is not None:
self.sftp.chmod(path, self._file_mode)
if self._uid or self._gid:
self._chown(path, uid=self._uid, gid=self._gid)
return name
def delete(self, name):
try:
self.sftp.remove(self._remote_path(name))
except OSError:
pass
def exists(self, name):
try:
self.sftp.stat(self._remote_path(name))
return True
except FileNotFoundError:
return False
def _isdir_attr(self, item):
# Return whether an item in sftp.listdir_attr results is a directory
if item.st_mode is not None:
return stat.S_IFMT(item.st_mode) == stat.S_IFDIR
else:
return False
def listdir(self, path):
remote_path = self._remote_path(path)
dirs, files = [], []
for item in self.sftp.listdir_attr(remote_path):
if self._isdir_attr(item):
dirs.append(item.filename)
else:
files.append(item.filename)
return dirs, files
def size(self, name):
remote_path = self._remote_path(name)
return self.sftp.stat(remote_path).st_size
def accessed_time(self, name):
remote_path = self._remote_path(name)
utime = self.sftp.stat(remote_path).st_atime
return datetime.fromtimestamp(utime)
def modified_time(self, name):
remote_path = self._remote_path(name)
utime = self.sftp.stat(remote_path).st_mtime
return datetime.fromtimestamp(utime)
def url(self, name):
if self._base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urljoin(self._base_url, name).replace('\\', '/')
class SFTPStorageFile(File):
def __init__(self, name, storage, mode):
self.name = name
self.mode = mode
self.file = io.BytesIO()
self._storage = storage
self._is_read = False
self._is_dirty = False
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self.name)
return self._size
def read(self, num_bytes=None):
if not self._is_read:
self.file = self._storage._read(self.name)
self._is_read = True
return self.file.read(num_bytes)
def write(self, content):
if 'w' not in self.mode:
raise AttributeError("File was opened for read-only access.")
self.file = io.BytesIO(content)
self._is_dirty = True
self._is_read = True
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and self._storage.exists(self.name):
self.file = self._storage._open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
if self._is_dirty:
self._storage._save(self.name, self)
self.file.close()
| |
"""Unit test for network_service - Treadmill Network configuration service
"""
import os
import shutil
import subprocess
import tempfile
import unittest
import mock
import treadmill
from treadmill.services import network_service
class NetworkServiceTest(unittest.TestCase):
"""Unit tests for the network service implementation.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.netdev.dev_mtu', mock.Mock())
@mock.patch('treadmill.netdev.dev_speed', mock.Mock())
@mock.patch('treadmill.services.network_service._device_ip', mock.Mock())
def test_init(self):
"""Test Network service constructor.
"""
network_service.NetworkResourceService(
ext_device='eth42',
)
treadmill.netdev.dev_mtu.assert_called_with('eth42')
treadmill.netdev.dev_speed.assert_called_with('eth42')
@mock.patch('treadmill.netdev.addr_add', mock.Mock())
@mock.patch('treadmill.netdev.bridge_addif', mock.Mock())
@mock.patch('treadmill.netdev.bridge_create', mock.Mock())
@mock.patch('treadmill.netdev.bridge_delete', mock.Mock())
@mock.patch('treadmill.netdev.bridge_setfd', mock.Mock())
@mock.patch('treadmill.netdev.dev_conf_route_localnet_set', mock.Mock())
@mock.patch('treadmill.netdev.dev_mac',
mock.Mock(return_value='11:22:33:44:55:66'))
@mock.patch('treadmill.netdev.dev_mtu', mock.Mock())
@mock.patch('treadmill.netdev.link_add_veth', mock.Mock())
@mock.patch('treadmill.netdev.link_del_veth', mock.Mock())
@mock.patch('treadmill.netdev.link_set_addr', mock.Mock())
@mock.patch('treadmill.netdev.link_set_down', mock.Mock())
@mock.patch('treadmill.netdev.link_set_mtu', mock.Mock())
@mock.patch('treadmill.netdev.link_set_up', mock.Mock())
@mock.patch('treadmill.services.network_service._device_ip', mock.Mock())
def test__bridge_initialize(self):
"""Test Network service bridge initialization.
"""
# Access to a protected member _bridge_initialize
# pylint: disable=W0212
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
svc._bridge_initialize()
treadmill.netdev.link_set_down.assert_has_calls(
[
mock.call('tm0'),
mock.call('br0'),
]
)
treadmill.netdev.link_del_veth.assert_called_with('tm0')
treadmill.netdev.bridge_delete.assert_has_calls(
[
mock.call('tm0'), # FIXME(boysson): For legacy setup cleanup
mock.call('br0'),
]
)
treadmill.netdev.bridge_create.assert_called_with('br0')
treadmill.netdev.bridge_setfd.assert_called_with('br0', 0)
# Setup the TM link with the right MTU
treadmill.netdev.link_add_veth.assert_called_with('tm0', 'tm1')
treadmill.netdev.link_set_mtu.assert_has_calls(
[
mock.call('tm0', 9000),
mock.call('tm1', 9000),
]
)
treadmill.netdev.link_set_mtu.assert_called_with('tm1', 9000)
# Make sure the bridge's address is fixed
treadmill.netdev.dev_mac.assert_called_with('tm1')
treadmill.netdev.link_set_addr('br0', '11:22:33:44:55:66')
# Add one end of the link to the bridge
treadmill.netdev.bridge_addif.assert_called_with('br0', 'tm1')
# Everything is brought up
treadmill.netdev.link_set_up.assert_has_calls(
[
mock.call('br0'),
mock.call('tm1'),
mock.call('tm0'),
]
)
# And the TM interface has the right IP
treadmill.netdev.addr_add.assert_called_with(
devname='tm0', addr='192.168.254.254/16',
)
treadmill.netdev.dev_conf_route_localnet_set.assert_called_with(
'tm0', True
)
@mock.patch('treadmill.iptables.init_set', mock.Mock())
@mock.patch('treadmill.netdev.bridge_brif',
mock.Mock(return_value=['foo', 'bar']))
@mock.patch('treadmill.netdev.bridge_setfd', mock.Mock())
@mock.patch('treadmill.netdev.dev_conf_route_localnet_set', mock.Mock())
@mock.patch('treadmill.netdev.dev_mtu', mock.Mock())
@mock.patch('treadmill.netdev.link_set_up', mock.Mock())
@mock.patch('treadmill.services.network_service._device_info', mock.Mock())
@mock.patch('treadmill.services.network_service.NetworkResourceService.'
'_bridge_initialize', mock.Mock())
@mock.patch('treadmill.services.network_service._device_ip', mock.Mock())
@mock.patch('treadmill.vipfile.VipMgr', autospec=True)
def test_initialize_quick(self, mock_vipmgr):
"""Test service initialization (quick restart).
"""
# Access to a protected member _device_info of a client class
# pylint: disable=W0212
treadmill.services.network_service._device_info.side_effect = \
lambda dev: {'alias': 'reqid_%s' % dev}
mock_vipmgr_inst = mock_vipmgr.return_value
mock_vipmgr_inst.list.return_value = [
('192.168.1.2', 'reqid_foo'),
('192.168.43.10', 'reqid_bar'),
('192.168.8.9', 'reqid_baz'),
]
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
svc.initialize(self.root)
mock_vipmgr.assert_called_with(
mock.ANY,
svc._service_rsrc_dir
)
self.assertTrue(mock_vipmgr_inst.garbage_collect.called)
treadmill.iptables.init_set.assert_has_calls(
[
mock.call(treadmill.iptables.SET_PROD_CONTAINERS,
family='inet', hashsize=1024, maxelem=65536),
mock.call(treadmill.iptables.SET_NONPROD_CONTAINERS,
family='inet', hashsize=1024, maxelem=65536),
],
any_order=True
)
treadmill.netdev.link_set_up.assert_has_calls(
[
mock.call('tm0'),
mock.call('tm1'),
mock.call('br0'),
]
)
# Re-init is not called
self.assertFalse(svc._bridge_initialize.called)
self.assertFalse(mock_vipmgr_inst.initialize.called)
treadmill.netdev.bridge_setfd.assert_called_with('br0', 0)
treadmill.netdev.dev_conf_route_localnet_set('tm0', True)
treadmill.netdev.dev_mtu.assert_called_with('br0')
treadmill.netdev.bridge_brif('br0')
treadmill.services.network_service._device_info.assert_has_calls(
[
mock.call('foo'),
mock.call('bar'),
]
)
mock_vipmgr_inst.free.assert_called_with('reqid_baz', '192.168.8.9')
self.assertEqual(
svc._devices,
{
'reqid_bar': {
'alias': 'reqid_bar',
'ip': '192.168.43.10',
'stale': True,
},
'reqid_foo': {
'alias': 'reqid_foo',
'ip': '192.168.1.2',
'stale': True,
}
},
'All devices must be unified with their IP and marked stale'
)
@mock.patch('treadmill.iptables.init_set', mock.Mock())
@mock.patch('treadmill.netdev.bridge_brif', mock.Mock(return_value=[]))
@mock.patch('treadmill.netdev.bridge_setfd', mock.Mock())
@mock.patch('treadmill.netdev.dev_conf_route_localnet_set', mock.Mock())
@mock.patch('treadmill.netdev.dev_mtu', mock.Mock())
@mock.patch('treadmill.netdev.link_set_up', mock.Mock())
@mock.patch('treadmill.services.network_service._device_info', mock.Mock())
@mock.patch('treadmill.services.network_service._device_ip', mock.Mock())
@mock.patch('treadmill.services.network_service.NetworkResourceService.'
'_bridge_initialize', mock.Mock())
@mock.patch('treadmill.vipfile.VipMgr', autospec=True)
def test_initialize(self, mock_vipmgr):
"""Test service initialization.
"""
# Access to a protected member _device_info of a client class
# pylint: disable=W0212
treadmill.services.network_service._device_info.side_effect = \
lambda dev: {'alias': 'reqid_%s' % dev}
treadmill.netdev.link_set_up.side_effect = [
subprocess.CalledProcessError('any', 'how'),
None,
]
mock_vipmgr_inst = mock_vipmgr.return_value
mock_vipmgr_inst.list.return_value = []
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
svc.initialize(self.root)
mock_vipmgr.assert_called_with(
mock.ANY,
svc._service_rsrc_dir
)
self.assertTrue(mock_vipmgr_inst.garbage_collect.called)
treadmill.iptables.init_set.assert_has_calls(
[
mock.call(treadmill.iptables.SET_PROD_CONTAINERS,
family='inet', hashsize=1024, maxelem=65536),
mock.call(treadmill.iptables.SET_NONPROD_CONTAINERS,
family='inet', hashsize=1024, maxelem=65536),
],
any_order=True
)
treadmill.netdev.link_set_up.assert_called_with('tm0')
self.assertTrue(svc._bridge_initialize.called)
self.assertTrue(mock_vipmgr_inst.initialize.called)
treadmill.netdev.bridge_setfd.assert_called_with('br0', 0)
treadmill.netdev.dev_mtu.assert_called_with('br0')
treadmill.netdev.dev_conf_route_localnet_set('tm0', True)
treadmill.netdev.bridge_brif('br0')
self.assertFalse(
treadmill.services.network_service._device_info.called
)
self.assertFalse(mock_vipmgr_inst.free.called)
self.assertEqual(
svc._devices,
{}
)
@mock.patch('treadmill.services.network_service._device_ip', mock.Mock())
def test_event_handlers(self):
"""Test event_handlers request.
"""
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
self.assertEqual(
svc.event_handlers(),
[]
)
@mock.patch('treadmill.services.network_service._device_ip',
mock.Mock(return_value='a.b.c.d'))
def test_report_status(self):
"""Test service status reporting.
"""
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
status = svc.report_status()
self.assertEqual(
status,
{
'bridge_dev': 'br0',
'bridge_mtu': 0,
'internal_device': 'tm0',
'internal_ip': '192.168.254.254',
'devices': {},
'external_mtu': 9000,
'external_speed': 10000,
'external_ip': 'a.b.c.d',
'external_device': 'eth42',
}
)
@mock.patch('treadmill.iptables.add_mark_rule', mock.Mock())
@mock.patch('treadmill.netdev.addr_add', mock.Mock())
@mock.patch('treadmill.netdev.bridge_addif', mock.Mock())
@mock.patch('treadmill.netdev.link_add_veth', mock.Mock())
@mock.patch('treadmill.netdev.link_set_alias', mock.Mock())
@mock.patch('treadmill.netdev.link_set_mtu', mock.Mock())
@mock.patch('treadmill.netdev.link_set_up', mock.Mock())
@mock.patch('treadmill.services.network_service._device_info',
autospec=True)
@mock.patch('treadmill.services.network_service._device_ip',
mock.Mock(return_value='1.2.3.4'))
def test_on_create_request(self, mock_devinfo):
"""Test processing of a network create request.
"""
# Access to a protected member _devices
# pylint: disable=W0212
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
svc._vips = mock.Mock()
mockip = svc._vips.alloc.return_value
request = {
'environment': 'dev',
}
request_id = 'myproid.test-0-ID1234'
mock_devinfo.return_value = {'test': 'me'}
network = svc.on_create_request(request_id, request)
svc._vips.alloc.assert_called_with(request_id)
treadmill.netdev.link_add_veth.assert_called_with(
'0000000ID1234.0', '0000000ID1234.1',
)
treadmill.netdev.link_set_mtu.assert_has_calls(
[
mock.call('0000000ID1234.0', 9000),
mock.call('0000000ID1234.1', 9000),
]
)
treadmill.netdev.link_set_alias.assert_has_calls(
[
mock.call('0000000ID1234.0', request_id),
mock.call('0000000ID1234.1', request_id),
]
)
treadmill.netdev.bridge_addif.assert_called_with(
'br0', '0000000ID1234.0'
)
treadmill.netdev.link_set_up.assert_called_with(
'0000000ID1234.0',
)
mock_devinfo.assert_called_with('0000000ID1234.0')
self.assertEqual(
network,
{
'gateway': '192.168.254.254',
'veth': '0000000ID1234.1',
'vip': mockip,
'external_ip': '1.2.3.4',
}
)
self.assertEqual(
svc._devices,
{
request_id: {
'environment': 'dev',
'ip': mockip,
'test': 'me',
}
}
)
treadmill.iptables.add_mark_rule.assert_called_with(
mockip, 'dev'
)
@mock.patch('treadmill.iptables.add_mark_rule', mock.Mock())
@mock.patch('treadmill.netdev.addr_add', mock.Mock())
@mock.patch('treadmill.netdev.bridge_addif', mock.Mock())
@mock.patch('treadmill.netdev.link_add_veth', mock.Mock())
@mock.patch('treadmill.netdev.link_set_alias', mock.Mock())
@mock.patch('treadmill.netdev.link_set_mtu', mock.Mock())
@mock.patch('treadmill.netdev.link_set_up', mock.Mock())
@mock.patch('treadmill.services.network_service._device_info',
autospec=True)
@mock.patch('treadmill.services.network_service._device_ip',
mock.Mock(return_value='1.2.3.4'))
def test_on_create_request_existing(self, mock_devinfo):
"""Test processing of a network create request when the device exists
(restarts).
"""
# Access to a protected member _devices
# pylint: disable=W0212
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
svc._vips = mock.Mock()
request = {
'environment': 'dev',
}
request_id = 'myproid.test-0-ID1234'
# Fake the exis
svc._devices = {
request_id: {
'ip': 'old_ip',
},
}
mock_devinfo.return_value = {'test': 'me'}
network = svc.on_create_request(request_id, request)
self.assertFalse(svc._vips.alloc.called)
self.assertFalse(treadmill.netdev.link_add_veth.called)
self.assertFalse(treadmill.netdev.link_set_mtu.called)
self.assertFalse(treadmill.netdev.link_set_alias.called)
self.assertFalse(treadmill.netdev.bridge_addif.called)
self.assertFalse(treadmill.netdev.link_set_up.called)
mock_devinfo.assert_called_with('0000000ID1234.0')
self.assertEqual(
network,
{
'gateway': '192.168.254.254',
'veth': '0000000ID1234.1',
'vip': 'old_ip',
'external_ip': '1.2.3.4',
}
)
self.assertEqual(
svc._devices,
{
request_id: {
'environment': 'dev',
'ip': 'old_ip',
'test': 'me',
}
}
)
treadmill.iptables.add_mark_rule.assert_called_with(
'old_ip', 'dev'
)
@mock.patch('treadmill.iptables.delete_mark_rule', mock.Mock())
@mock.patch('treadmill.netdev.dev_state', mock.Mock())
@mock.patch('treadmill.netdev.link_del_veth', mock.Mock())
@mock.patch('treadmill.vipfile.VipMgr', autospec=True)
@mock.patch('treadmill.services.network_service._device_ip', mock.Mock())
def test_on_delete_request(self, mock_vipmgr):
"""Test processing of a localdisk delete request.
"""
# Access to a protected member
# pylint: disable=W0212
svc = network_service.NetworkResourceService(
ext_device='eth42',
ext_speed=10000,
ext_mtu=9000,
)
svc._vips = mock_vipmgr(mock.ANY, mock.ANY)
request_id = 'myproid.test-0-ID1234'
svc._devices[request_id] = {
'ip': 'test_ip',
'environment': 'test_env',
}
svc.on_delete_request(request_id)
treadmill.netdev.dev_state.assert_called_with(
'0000000ID1234.0'
)
treadmill.netdev.link_del_veth.assert_called_with(
'0000000ID1234.0'
)
treadmill.iptables.delete_mark_rule.assert_called_with(
'test_ip', 'test_env'
)
svc._vips.free.assert_called_with(
request_id, 'test_ip',
)
if __name__ == '__main__':
unittest.main()
| |
"""Support for covers which integrate with other components."""
import logging
import voluptuous as vol
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DEVICE_CLASSES_SCHEMA,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
SUPPORT_STOP_TILT,
CoverEntity,
)
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_ICON_TEMPLATE,
CONF_OPTIMISTIC,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_CLOSED,
STATE_OPEN,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_OPEN, STATE_CLOSED, "true", "false"]
CONF_COVERS = "covers"
CONF_POSITION_TEMPLATE = "position_template"
CONF_TILT_TEMPLATE = "tilt_template"
OPEN_ACTION = "open_cover"
CLOSE_ACTION = "close_cover"
STOP_ACTION = "stop_cover"
POSITION_ACTION = "set_cover_position"
TILT_ACTION = "set_cover_tilt_position"
CONF_TILT_OPTIMISTIC = "tilt_optimistic"
CONF_VALUE_OR_POSITION_TEMPLATE = "value_or_position"
CONF_OPEN_OR_CLOSE = "open_or_close"
TILT_FEATURES = (
SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_STOP_TILT
| SUPPORT_SET_TILT_POSITION
)
COVER_SCHEMA = vol.All(
vol.Schema(
{
vol.Inclusive(OPEN_ACTION, CONF_OPEN_OR_CLOSE): cv.SCRIPT_SCHEMA,
vol.Inclusive(CLOSE_ACTION, CONF_OPEN_OR_CLOSE): cv.SCRIPT_SCHEMA,
vol.Optional(STOP_ACTION): cv.SCRIPT_SCHEMA,
vol.Exclusive(
CONF_POSITION_TEMPLATE, CONF_VALUE_OR_POSITION_TEMPLATE
): cv.template,
vol.Exclusive(
CONF_VALUE_TEMPLATE, CONF_VALUE_OR_POSITION_TEMPLATE
): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_POSITION_TEMPLATE): cv.template,
vol.Optional(CONF_TILT_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_TILT_OPTIMISTIC): cv.boolean,
vol.Optional(POSITION_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(TILT_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
cv.has_at_least_one_key(OPEN_ACTION, POSITION_ACTION),
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA)}
)
async def _async_create_entities(hass, config):
"""Create the Template cover."""
covers = []
for device, device_config in config[CONF_COVERS].items():
state_template = device_config.get(CONF_VALUE_TEMPLATE)
position_template = device_config.get(CONF_POSITION_TEMPLATE)
tilt_template = device_config.get(CONF_TILT_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
friendly_name = device_config.get(CONF_FRIENDLY_NAME, device)
device_class = device_config.get(CONF_DEVICE_CLASS)
open_action = device_config.get(OPEN_ACTION)
close_action = device_config.get(CLOSE_ACTION)
stop_action = device_config.get(STOP_ACTION)
position_action = device_config.get(POSITION_ACTION)
tilt_action = device_config.get(TILT_ACTION)
optimistic = device_config.get(CONF_OPTIMISTIC)
tilt_optimistic = device_config.get(CONF_TILT_OPTIMISTIC)
unique_id = device_config.get(CONF_UNIQUE_ID)
covers.append(
CoverTemplate(
hass,
device,
friendly_name,
device_class,
state_template,
position_template,
tilt_template,
icon_template,
entity_picture_template,
availability_template,
open_action,
close_action,
stop_action,
position_action,
tilt_action,
optimistic,
tilt_optimistic,
unique_id,
)
)
return covers
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Template cover."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities(await _async_create_entities(hass, config))
class CoverTemplate(TemplateEntity, CoverEntity):
"""Representation of a Template cover."""
def __init__(
self,
hass,
device_id,
friendly_name,
device_class,
state_template,
position_template,
tilt_template,
icon_template,
entity_picture_template,
availability_template,
open_action,
close_action,
stop_action,
position_action,
tilt_action,
optimistic,
tilt_optimistic,
unique_id,
):
"""Initialize the Template cover."""
super().__init__(
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
self._position_template = position_template
self._tilt_template = tilt_template
self._device_class = device_class
self._open_script = None
domain = __name__.split(".")[-2]
if open_action is not None:
self._open_script = Script(hass, open_action, friendly_name, domain)
self._close_script = None
if close_action is not None:
self._close_script = Script(hass, close_action, friendly_name, domain)
self._stop_script = None
if stop_action is not None:
self._stop_script = Script(hass, stop_action, friendly_name, domain)
self._position_script = None
if position_action is not None:
self._position_script = Script(hass, position_action, friendly_name, domain)
self._tilt_script = None
if tilt_action is not None:
self._tilt_script = Script(hass, tilt_action, friendly_name, domain)
self._optimistic = optimistic or (not state_template and not position_template)
self._tilt_optimistic = tilt_optimistic or not tilt_template
self._position = None
self._tilt_value = None
self._unique_id = unique_id
async def async_added_to_hass(self):
"""Register callbacks."""
if self._template:
self.add_template_attribute(
"_position", self._template, None, self._update_state
)
if self._position_template:
self.add_template_attribute(
"_position",
self._position_template,
None,
self._update_position,
none_on_template_error=True,
)
if self._tilt_template:
self.add_template_attribute(
"_tilt_value",
self._tilt_template,
None,
self._update_tilt,
none_on_template_error=True,
)
await super().async_added_to_hass()
@callback
def _update_state(self, result):
super()._update_state(result)
if isinstance(result, TemplateError):
self._position = None
return
if result in _VALID_STATES:
if result in ("true", STATE_OPEN):
self._position = 100
else:
self._position = 0
else:
_LOGGER.error(
"Received invalid cover is_on state: %s. Expected: %s",
result,
", ".join(_VALID_STATES),
)
self._position = None
@callback
def _update_position(self, result):
try:
state = float(result)
except ValueError as err:
_LOGGER.error(err)
self._position = None
return
if state < 0 or state > 100:
self._position = None
_LOGGER.error(
"Cover position value must be" " between 0 and 100." " Value was: %.2f",
state,
)
else:
self._position = state
@callback
def _update_tilt(self, result):
try:
state = float(result)
except ValueError as err:
_LOGGER.error(err)
self._tilt_value = None
return
if state < 0 or state > 100:
self._tilt_value = None
_LOGGER.error(
"Tilt value must be between 0 and 100. Value was: %.2f", state,
)
else:
self._tilt_value = state
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this cover."""
return self._unique_id
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._position == 0
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
if self._position_template or self._position_script:
return self._position
return None
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._tilt_value
@property
def device_class(self):
"""Return the device class of the cover."""
return self._device_class
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE
if self._stop_script is not None:
supported_features |= SUPPORT_STOP
if self._position_script is not None:
supported_features |= SUPPORT_SET_POSITION
if self._tilt_script is not None:
supported_features |= TILT_FEATURES
return supported_features
async def async_open_cover(self, **kwargs):
"""Move the cover up."""
if self._open_script:
await self._open_script.async_run(context=self._context)
elif self._position_script:
await self._position_script.async_run(
{"position": 100}, context=self._context
)
if self._optimistic:
self._position = 100
self.async_write_ha_state()
async def async_close_cover(self, **kwargs):
"""Move the cover down."""
if self._close_script:
await self._close_script.async_run(context=self._context)
elif self._position_script:
await self._position_script.async_run(
{"position": 0}, context=self._context
)
if self._optimistic:
self._position = 0
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs):
"""Fire the stop action."""
if self._stop_script:
await self._stop_script.async_run(context=self._context)
async def async_set_cover_position(self, **kwargs):
"""Set cover position."""
self._position = kwargs[ATTR_POSITION]
await self._position_script.async_run(
{"position": self._position}, context=self._context
)
if self._optimistic:
self.async_write_ha_state()
async def async_open_cover_tilt(self, **kwargs):
"""Tilt the cover open."""
self._tilt_value = 100
await self._tilt_script.async_run(
{"tilt": self._tilt_value}, context=self._context
)
if self._tilt_optimistic:
self.async_write_ha_state()
async def async_close_cover_tilt(self, **kwargs):
"""Tilt the cover closed."""
self._tilt_value = 0
await self._tilt_script.async_run(
{"tilt": self._tilt_value}, context=self._context
)
if self._tilt_optimistic:
self.async_write_ha_state()
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
self._tilt_value = kwargs[ATTR_TILT_POSITION]
await self._tilt_script.async_run(
{"tilt": self._tilt_value}, context=self._context
)
if self._tilt_optimistic:
self.async_write_ha_state()
| |
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains Query objects that deal with "spans".
Span queries allow for positional constraints on matching documents. For
example, the :class:`whoosh.spans.SpanNear` query matches documents where one
term occurs near another. Because you can nest span queries, and wrap them
around almost any non-span query, you can create very complex constraints.
For example, to find documents containing "whoosh" at most 5 positions before
"library" in the "text" field::
from whoosh import query, spans
t1 = query.Term("text", "whoosh")
t2 = query.Term("text", "library")
q = spans.SpanNear(t1, t2, slop=5)
"""
from whoosh.matching import mcore, wrappers, binary
from whoosh.query import Query, And, AndMaybe, Or, Term
from whoosh.util import make_binary_tree
# Span class
class Span(object):
__slots__ = ("start", "end", "startchar", "endchar", "boost")
def __init__(self, start, end=None, startchar=None, endchar=None,
boost=1.0):
if end is None:
end = start
assert start <= end
self.start = start
self.end = end
self.startchar = startchar
self.endchar = endchar
self.boost = boost
def __repr__(self):
if self.startchar is not None or self.endchar is not None:
return "<%d-%d %d:%d>" % (self.start, self.end, self.startchar,
self.endchar)
else:
return "<%d-%d>" % (self.start, self.end)
def __eq__(self, span):
return (self.start == span.start
and self.end == span.end
and self.startchar == span.startchar
and self.endchar == span.endchar)
def __ne__(self, span):
return self.start != span.start or self.end != span.end
def __lt__(self, span):
return self.start < span.start
def __gt__(self, span):
return self.start > span.start
def __hash__(self):
return hash((self.start, self.end))
@classmethod
def merge(cls, spans):
"""Merges overlapping and touches spans in the given list of spans.
Note that this modifies the original list.
>>> spans = [Span(1,2), Span(3)]
>>> Span.merge(spans)
>>> spans
[<1-3>]
"""
i = 0
while i < len(spans) - 1:
here = spans[i]
j = i + 1
while j < len(spans):
there = spans[j]
if there.start > here.end + 1:
break
if here.touches(there) or here.overlaps(there):
here = here.to(there)
spans[i] = here
del spans[j]
else:
j += 1
i += 1
return spans
def to(self, span):
if self.startchar is None:
minchar = span.startchar
elif span.startchar is None:
minchar = self.startchar
else:
minchar = min(self.startchar, span.startchar)
if self.endchar is None:
maxchar = span.endchar
elif span.endchar is None:
maxchar = self.endchar
else:
maxchar = max(self.endchar, span.endchar)
minpos = min(self.start, span.start)
maxpos = max(self.end, span.end)
return self.__class__(minpos, maxpos, minchar, maxchar)
def overlaps(self, span):
return ((self.start >= span.start and self.start <= span.end)
or (self.end >= span.start and self.end <= span.end)
or (span.start >= self.start and span.start <= self.end)
or (span.end >= self.start and span.end <= self.end))
def surrounds(self, span):
return self.start < span.start and self.end > span.end
def is_within(self, span):
return self.start >= span.start and self.end <= span.end
def is_before(self, span):
return self.end < span.start
def is_after(self, span):
return self.start > span.end
def touches(self, span):
return self.start == span.end + 1 or self.end == span.start - 1
def distance_to(self, span):
if self.overlaps(span):
return 0
elif self.is_before(span):
return span.start - self.end
else:
return self.start - span.end
def bisect_spans(spans, start):
lo = 0
hi = len(spans)
while lo < hi:
mid = (lo + hi) // 2
if spans[mid].start < start:
lo = mid + 1
else:
hi = mid
return lo
# Base matchers
class SpanWrappingMatcher(wrappers.WrappingMatcher):
"""An abstract matcher class that wraps a "regular" matcher. This matcher
uses the sub-matcher's matching logic, but only matches documents that have
matching spans, i.e. where ``_get_spans()`` returns a non-empty list.
Subclasses must implement the ``_get_spans()`` method, which returns a list
of valid spans for the current document.
"""
def __init__(self, child):
super(SpanWrappingMatcher, self).__init__(child)
self._spans = None
if self.is_active():
self._find_next()
def copy(self):
m = self.__class__(self.child.copy())
m._spans = self._spans
return m
def _replacement(self, newchild):
return self.__class__(newchild)
def _find_next(self):
if not self.is_active():
return
child = self.child
r = False
spans = self._get_spans()
while child.is_active() and not spans:
r = child.next() or r
if not child.is_active():
return True
spans = self._get_spans()
self._spans = spans
return r
def spans(self):
return self._spans
def next(self):
self.child.next()
self._find_next()
def skip_to(self, id):
self.child.skip_to(id)
self._find_next()
def all_ids(self):
while self.is_active():
if self.spans():
yield self.id()
self.next()
class SpanBiMatcher(SpanWrappingMatcher):
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def depth(self):
return 1 + max(self.a.depth(), self.b.depth())
def replace(self, minquality=0):
# TODO: fix this
if not self.is_active():
return mcore.NullMatcher()
return self
# Queries
class SpanQuery(Query):
"""Abstract base class for span-based queries. Each span query type wraps
a "regular" query that implements the basic document-matching functionality
(for example, SpanNear wraps an And query, because SpanNear requires that
the two sub-queries occur in the same documents. The wrapped query is
stored in the ``q`` attribute.
Subclasses usually only need to implement the initializer to set the
wrapped query, and ``matcher()`` to return a span-aware matcher object.
"""
def _subm(self, s, context=None):
return self.q.matcher(s, context)
def __getattr__(self, name):
return super(Query, self).__getattr__(self.q, name)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.q)
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.q == other.q)
def __hash__(self):
return hash(self.__class__.__name__) ^ hash(self.q)
class SpanFirst(SpanQuery):
"""Matches spans that end within the first N positions. This lets you
for example only match terms near the beginning of the document.
"""
def __init__(self, q, limit=0):
"""
:param q: the query to match.
:param limit: the query must match within this position at the start
of a document. The default is ``0``, which means the query must
match at the first position.
"""
self.q = q
self.limit = limit
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.q == other.q and self.limit == other.limit)
def __hash__(self):
return hash(self.q) ^ hash(self.limit)
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__(fn(self.q), limit=self.limit)
def matcher(self, searcher, context=None):
m = self._subm(searcher, context)
return SpanFirst.SpanFirstMatcher(m, limit=self.limit)
class SpanFirstMatcher(SpanWrappingMatcher):
def __init__(self, child, limit=0):
self.limit = limit
super(SpanFirst.SpanFirstMatcher, self).__init__(child)
def copy(self):
return self.__class__(self.child.copy(), limit=self.limit)
def _replacement(self, newchild):
return self.__class__(newchild, limit=self.limit)
def _get_spans(self):
return [span for span in self.child.spans()
if span.end <= self.limit]
class SpanNear(SpanQuery):
"""
Note: for new code, use :class:`SpanNear2` instead of this class. SpanNear2
takes a list of sub-queries instead of requiring you to create a binary
tree of query objects.
Matches queries that occur near each other. By default, only matches
queries that occur right next to each other (slop=1) and in order
(ordered=True).
For example, to find documents where "whoosh" occurs next to "library"
in the "text" field::
from whoosh import query, spans
t1 = query.Term("text", "whoosh")
t2 = query.Term("text", "library")
q = spans.SpanNear(t1, t2)
To find documents where "whoosh" occurs at most 5 positions before
"library"::
q = spans.SpanNear(t1, t2, slop=5)
To find documents where "whoosh" occurs at most 5 positions before or after
"library"::
q = spans.SpanNear(t1, t2, slop=5, ordered=False)
You can use the ``phrase()`` class method to create a tree of SpanNear
queries to match a list of terms::
q = spans.SpanNear.phrase("text", ["whoosh", "search", "library"],
slop=2)
"""
def __init__(self, a, b, slop=1, ordered=True, mindist=1):
"""
:param a: the first query to match.
:param b: the second query that must occur within "slop" positions of
the first query.
:param slop: the number of positions within which the queries must
occur. Default is 1, meaning the queries must occur right next
to each other.
:param ordered: whether a must occur before b. Default is True.
:pram mindist: the minimum distance allowed between the queries.
"""
self.q = And([a, b])
self.a = a
self.b = b
self.slop = slop
self.ordered = ordered
self.mindist = mindist
def __repr__(self):
return ("%s(%r, slop=%d, ordered=%s, mindist=%d)"
% (self.__class__.__name__, self.q, self.slop, self.ordered,
self.mindist))
def __eq__(self, other):
return (other and self.__class__ == other.__class__
and self.q == other.q and self.slop == other.slop
and self.ordered == other.ordered
and self.mindist == other.mindist)
def __hash__(self):
return (hash(self.a) ^ hash(self.b) ^ hash(self.slop)
^ hash(self.ordered) ^ hash(self.mindist))
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__(fn(self.a), fn(self.b), slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def matcher(self, searcher, context=None):
ma = self.a.matcher(searcher, context)
mb = self.b.matcher(searcher, context)
return SpanNear.SpanNearMatcher(ma, mb, slop=self.slop,
ordered=self.ordered,
mindist=self.mindist)
@classmethod
def phrase(cls, fieldname, words, slop=1, ordered=True):
"""Returns a tree of SpanNear queries to match a list of terms.
This class method is a convenience for constructing a phrase query
using a binary tree of SpanNear queries::
SpanNear.phrase("content", ["alfa", "bravo", "charlie", "delta"])
:param fieldname: the name of the field to search in.
:param words: a sequence of texts to search for.
:param slop: the number of positions within which the terms must
occur. Default is 1, meaning the terms must occur right next
to each other.
:param ordered: whether the terms must occur in order. Default is True.
"""
terms = [Term(fieldname, word) for word in words]
return make_binary_tree(cls, terms, slop=slop, ordered=ordered)
class SpanNearMatcher(SpanWrappingMatcher):
def __init__(self, a, b, slop=1, ordered=True, mindist=1):
self.a = a
self.b = b
self.slop = slop
self.ordered = ordered
self.mindist = mindist
isect = binary.IntersectionMatcher(a, b)
super(SpanNear.SpanNearMatcher, self).__init__(isect)
def copy(self):
return self.__class__(self.a.copy(), self.b.copy(), slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def replace(self, minquality=0):
# TODO: fix this
if not self.is_active():
return mcore.NullMatcher()
return self
def _get_spans(self):
slop = self.slop
mindist = self.mindist
ordered = self.ordered
spans = set()
bspans = self.b.spans()
for aspan in self.a.spans():
for bspan in bspans:
if (bspan.end < aspan.start - slop
or (ordered and aspan.start > bspan.start)):
# B is too far in front of A, or B is in front of A
# *at all* when ordered is True
continue
if bspan.start > aspan.end + slop:
# B is too far from A. Since spans are listed in
# start position order, we know that all spans after
# this one will also be too far.
break
# Check the distance between the spans
dist = aspan.distance_to(bspan)
if mindist <= dist <= slop:
spans.add(aspan.to(bspan))
return sorted(spans)
class SpanNear2(SpanQuery):
"""
Matches queries that occur near each other. By default, only matches
queries that occur right next to each other (slop=1) and in order
(ordered=True).
New code should use this query type instead of :class:`SpanNear`.
(Unlike :class:`SpanNear`, this query takes a list of subqueries instead of
requiring you to build a binary tree of query objects. This query should
also be slightly faster due to less overhead.)
For example, to find documents where "whoosh" occurs next to "library"
in the "text" field::
from whoosh import query, spans
t1 = query.Term("text", "whoosh")
t2 = query.Term("text", "library")
q = spans.SpanNear2([t1, t2])
To find documents where "whoosh" occurs at most 5 positions before
"library"::
q = spans.SpanNear2([t1, t2], slop=5)
To find documents where "whoosh" occurs at most 5 positions before or after
"library"::
q = spans.SpanNear2(t1, t2, slop=5, ordered=False)
"""
def __init__(self, qs, slop=1, ordered=True, mindist=1):
"""
:param qs: a sequence of sub-queries to match.
:param slop: the number of positions within which the queries must
occur. Default is 1, meaning the queries must occur right next
to each other.
:param ordered: whether a must occur before b. Default is True.
:pram mindist: the minimum distance allowed between the queries.
"""
self.qs = qs
self.slop = slop
self.ordered = ordered
self.mindist = mindist
def __repr__(self):
return ("%s(%r, slop=%d, ordered=%s, mindist=%d)"
% (self.__class__.__name__, self.qs, self.slop, self.ordered,
self.mindist))
def __eq__(self, other):
return (other and self.__class__ == other.__class__
and self.qs == other.qs and self.slop == other.slop
and self.ordered == other.ordered
and self.mindist == other.mindist)
def __hash__(self):
h = hash(self.slop) ^ hash(self.ordered) ^ hash(self.mindist)
for q in self.qs:
h ^= hash(q)
return h
def is_leaf(self):
return False
def children(self):
return self.qs
def apply(self, fn):
return self.__class__([fn(q) for q in self.qs], slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def matcher(self, searcher, context=None):
ms = [q.matcher(searcher, context) for q in self.qs]
return self.SpanNear2Matcher(ms, slop=self.slop, ordered=self.ordered,
mindist=self.mindist)
class SpanNear2Matcher(SpanWrappingMatcher):
def __init__(self, ms, slop=1, ordered=True, mindist=1):
self.ms = ms
self.slop = slop
self.ordered = ordered
self.mindist = mindist
isect = make_binary_tree(binary.IntersectionMatcher, ms)
super(SpanNear2.SpanNear2Matcher, self).__init__(isect)
def copy(self):
return self.__class__([m.copy() for m in self.ms], slop=self.slop,
ordered=self.ordered, mindist=self.mindist)
def replace(self, minquality=0):
# TODO: fix this
if not self.is_active():
return mcore.NullMatcher()
return self
def _get_spans(self):
slop = self.slop
mindist = self.mindist
ordered = self.ordered
ms = self.ms
aspans = ms[0].spans()
i = 1
while i < len(ms) and aspans:
bspans = ms[i].spans()
spans = set()
for aspan in aspans:
# Use a binary search to find the first position we should
# start looking for possible matches
if ordered:
start = aspan.start
else:
start = max(0, aspan.start - slop)
j = bisect_spans(bspans, start)
while j < len(bspans):
bspan = bspans[j]
j += 1
if (bspan.end < aspan.start - slop
or (ordered and aspan.start > bspan.start)):
# B is too far in front of A, or B is in front of A
# *at all* when ordered is True
continue
if bspan.start > aspan.end + slop:
# B is too far from A. Since spans are listed in
# start position order, we know that all spans after
# this one will also be too far.
break
# Check the distance between the spans
dist = aspan.distance_to(bspan)
if mindist <= dist <= slop:
spans.add(aspan.to(bspan))
aspans = sorted(spans)
i += 1
if i == len(ms):
return aspans
else:
return []
class SpanOr(SpanQuery):
"""Matches documents that match any of a list of sub-queries. Unlike
query.Or, this class merges together matching spans from the different
sub-queries when they overlap.
"""
def __init__(self, subqs):
"""
:param subqs: a list of queries to match.
"""
self.q = Or(subqs)
self.subqs = subqs
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__([fn(sq) for sq in self.subqs])
def matcher(self, searcher, context=None):
matchers = [q.matcher(searcher, context) for q in self.subqs]
return make_binary_tree(SpanOr.SpanOrMatcher, matchers)
class SpanOrMatcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
um = binary.UnionMatcher(a, b)
super(SpanOr.SpanOrMatcher, self).__init__(um)
def _get_spans(self):
a_active = self.a.is_active()
b_active = self.b.is_active()
if a_active:
a_id = self.a.id()
if b_active:
b_id = self.b.id()
if a_id == b_id:
spans = sorted(set(self.a.spans())
| set(self.b.spans()))
elif a_id < b_id:
spans = self.a.spans()
else:
spans = self.b.spans()
else:
spans = self.a.spans()
else:
spans = self.b.spans()
Span.merge(spans)
return spans
class SpanBiQuery(SpanQuery):
# Intermediate base class for methods common to "a/b" span query types
def is_leaf(self):
return False
def apply(self, fn):
return self.__class__(fn(self.a), fn(self.b))
def matcher(self, searcher, context=None):
ma = self.a.matcher(searcher, context)
mb = self.b.matcher(searcher, context)
return self._Matcher(ma, mb)
class SpanNot(SpanBiQuery):
"""Matches spans from the first query only if they don't overlap with
spans from the second query. If there are no non-overlapping spans, the
document does not match.
For example, to match documents that contain "bear" at most 2 places after
"apple" in the "text" field but don't have "cute" between them::
from whoosh import query, spans
t1 = query.Term("text", "apple")
t2 = query.Term("text", "bear")
near = spans.SpanNear(t1, t2, slop=2)
q = spans.SpanNot(near, query.Term("text", "cute"))
"""
def __init__(self, a, b):
"""
:param a: the query to match.
:param b: do not match any spans that overlap with spans from this
query.
"""
self.q = AndMaybe(a, b)
self.a = a
self.b = b
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
amm = binary.AndMaybeMatcher(a, b)
super(SpanNot._Matcher, self).__init__(amm)
def _get_spans(self):
if self.a.id() == self.b.id():
spans = []
bspans = self.b.spans()
for aspan in self.a.spans():
overlapped = False
for bspan in bspans:
if aspan.overlaps(bspan):
overlapped = True
break
if not overlapped:
spans.append(aspan)
return spans
else:
return self.a.spans()
class SpanContains(SpanBiQuery):
"""Matches documents where the spans of the first query contain any spans
of the second query.
For example, to match documents where "apple" occurs at most 10 places
before "bear" in the "text" field and "cute" is between them::
from whoosh import query, spans
t1 = query.Term("text", "apple")
t2 = query.Term("text", "bear")
near = spans.SpanNear(t1, t2, slop=10)
q = spans.SpanContains(near, query.Term("text", "cute"))
"""
def __init__(self, a, b):
"""
:param a: the query to match.
:param b: the query whose spans must occur within the matching spans
of the first query.
"""
self.q = And([a, b])
self.a = a
self.b = b
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
im = binary.IntersectionMatcher(a, b)
super(SpanContains._Matcher, self).__init__(im)
def _get_spans(self):
spans = []
bspans = self.b.spans()
for aspan in self.a.spans():
for bspan in bspans:
if aspan.start > bspan.end:
continue
if aspan.end < bspan.start:
break
if bspan.is_within(aspan):
spans.append(aspan)
break
return spans
class SpanBefore(SpanBiQuery):
"""Matches documents where the spans of the first query occur before any
spans of the second query.
For example, to match documents where "apple" occurs anywhere before
"bear"::
from whoosh import query, spans
t1 = query.Term("text", "apple")
t2 = query.Term("text", "bear")
q = spans.SpanBefore(t1, t2)
"""
def __init__(self, a, b):
"""
:param a: the query that must occur before the second.
:param b: the query that must occur after the first.
"""
self.a = a
self.b = b
self.q = And([a, b])
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
self.b = b
im = binary.IntersectionMatcher(a, b)
super(SpanBefore._Matcher, self).__init__(im)
def _get_spans(self):
bminstart = min(bspan.start for bspan in self.b.spans())
return [aspan for aspan in self.a.spans() if aspan.end < bminstart]
class SpanCondition(SpanBiQuery):
"""Matches documents that satisfy both subqueries, but only uses the spans
from the first subquery.
This is useful when you want to place conditions on matches but not have
those conditions affect the spans returned.
For example, to get spans for the term ``alfa`` in documents that also
must contain the term ``bravo``::
SpanCondition(Term("text", u"alfa"), Term("text", u"bravo"))
"""
def __init__(self, a, b):
self.a = a
self.b = b
self.q = And([a, b])
class _Matcher(SpanBiMatcher):
def __init__(self, a, b):
self.a = a
im = binary.IntersectionMatcher(a, b)
super(SpanCondition._Matcher, self).__init__(im)
def _get_spans(self):
return self.a.spans()
| |
import asyncio
import unittest
import json
import base64
from . import RPC
from . import protocol
try:
spawn = asyncio.ensure_future
except:
spawn = asyncio.async
class WaitGroup(object):
def __init__(self, loop):
self.loop = loop
self.total = []
self.pending = []
def add(self, incr=1):
for n in range(incr):
f = asyncio.Future(loop=self.loop)
self.total.append(f)
self.pending.append(f)
def done(self):
if len(self.pending) == 0:
return
f = self.pending.pop()
f.set_result(True)
@asyncio.coroutine
def wait(self):
yield from asyncio.wait(self.total)
class MockConnection(object):
def __init__(self, loop):
self.sent = []
self.closed = False
self.pairedWith = None
self.expectedSends = WaitGroup(loop)
self.expectedRecvs = WaitGroup(loop)
self.inbox = asyncio.Queue(loop=loop)
@asyncio.coroutine
def close(self):
self.closed = True
@asyncio.coroutine
def send(self, frame):
self.sent.append(frame)
if self.pairedWith:
yield from self.pairedWith.inbox.put(frame)
self.expectedSends.done()
@asyncio.coroutine
def recv(self):
self.expectedRecvs.done()
frame = yield from self.inbox.get()
return frame
def connection_pair(loop):
conn1 = MockConnection(loop)
conn2 = MockConnection(loop)
conn1.pairedWith = conn2
conn2.pairedWith = conn1
return [conn1, conn2]
@asyncio.coroutine
def peer_pair(loop, rpc):
conn1, conn2 = connection_pair(loop)
tasks = [
spawn(rpc.accept(conn1, False), loop=loop),
spawn(rpc.handshake(conn2, False), loop=loop),
]
yield from asyncio.wait(tasks)
return [tasks[0].result(), tasks[1].result()]
def handshake(codec):
return "{0}/{1};{2}".format(
protocol.name,
protocol.version,
codec)
@asyncio.coroutine
def echo(ch):
obj, _ = yield from ch.recv()
yield from ch.send(obj)
class DuplexAsyncTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.close = [self.loop]
def tearDown(self):
for obj in self.close:
obj.close()
def spawn(self, *args, **kwargs):
kwargs['loop'] = self.loop
return spawn(*args, **kwargs)
def async(f):
def wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
args[0].loop.run_until_complete(
asyncio.wait_for(future, 5, loop=args[0].loop))
return wrapper
@async
def test_handshake(self):
conn = MockConnection(self.loop)
rpc = RPC("json", self.loop)
yield from conn.inbox.put(protocol.handshake.accept)
yield from rpc.handshake(conn, False)
yield from conn.close()
self.assertEqual(conn.sent[0], handshake("json"))
@async
def test_accept(self):
conn = MockConnection(self.loop)
rpc = RPC("json", self.loop)
yield from conn.inbox.put(handshake("json"))
yield from rpc.accept(conn, False)
yield from conn.close()
self.assertEqual(conn.sent[0], protocol.handshake.accept)
@async
def test_registered_func_after_accept(self):
conn = MockConnection(self.loop)
conn.expectedSends.add(2)
rpc = RPC("json", self.loop)
rpc.register("echo", echo)
yield from conn.inbox.put(handshake("json"))
peer = yield from rpc.accept(conn, False)
req = {
'type': protocol.types.request,
'method': "echo",
'id': 1,
'payload': {"foo": "bar"}
}
frame = json.dumps(req)
yield from conn.inbox.put(frame)
yield from peer.route(1)
yield from conn.expectedSends.wait()
yield from conn.close()
self.assertEqual(len(conn.sent), 2)
@async
def test_registered_func_after_handshake(self):
conn = MockConnection(self.loop)
conn.expectedSends.add(2)
rpc = RPC("json", self.loop)
rpc.register("echo", echo)
yield from conn.inbox.put(protocol.handshake.accept)
peer = yield from rpc.handshake(conn, False)
req = {
'type': protocol.types.request,
'method': "echo",
'id': 1,
'payload': {"foo": "bar"}
}
frame = json.dumps(req)
yield from conn.inbox.put(frame)
yield from peer.route(1)
yield from conn.expectedSends.wait()
yield from conn.close()
self.assertEqual(len(conn.sent), 2)
@async
def test_call_after_handshake(self):
conn = MockConnection(self.loop)
conn.expectedSends.add(2)
rpc = RPC("json", self.loop)
yield from conn.inbox.put(protocol.handshake.accept)
peer = yield from rpc.handshake(conn, False)
args = {"foo": "bar"}
expectedReply = {"baz": "qux"}
frame = json.dumps({
"type": protocol.types.reply,
"id": 1,
"payload": expectedReply,
})
@asyncio.coroutine
def inject_frame():
yield from conn.expectedSends.wait()
yield from conn.inbox.put(frame)
tasks = [
self.spawn(peer.call("foobar", args)),
self.spawn(inject_frame()),
peer.route(1),
]
yield from asyncio.wait(tasks)
reply = tasks[0].result()
yield from conn.close()
self.assertEqual(reply["baz"], expectedReply["baz"])
@async
def test_call_after_accept(self):
conn = MockConnection(self.loop)
conn.expectedSends.add(2)
rpc = RPC("json", self.loop)
yield from conn.inbox.put(handshake("json"))
peer = yield from rpc.accept(conn, False)
args = {"foo": "bar"}
expectedReply = {"baz": "qux"}
frame = json.dumps({
"type": protocol.types.reply,
"id": 1,
"payload": expectedReply,
})
@asyncio.coroutine
def inject_frame():
yield from conn.expectedSends.wait()
yield from conn.inbox.put(frame)
tasks = [
self.spawn(peer.call("foobar", args)),
self.spawn(inject_frame()),
peer.route(1),
]
yield from asyncio.wait(tasks)
reply = tasks[0].result()
yield from conn.close()
self.assertEqual(reply["baz"], expectedReply["baz"])
@async
def test_all_on_paired_peers(self):
conns = connection_pair(self.loop)
rpc = RPC("json", self.loop)
@asyncio.coroutine
def echo_tag(ch):
obj, _ = yield from ch.recv()
obj["tag"] = True
yield from ch.send(obj)
rpc.register("echo-tag", echo_tag)
tasks = [
self.spawn(rpc.accept(conns[0], False)),
self.spawn(rpc.handshake(conns[1], False)),
]
yield from asyncio.wait(tasks)
peer1 = tasks[0].result()
peer2 = tasks[1].result()
tasks = [
self.spawn(peer1.call("echo-tag", {"from": "peer1"})),
self.spawn(peer2.call("echo-tag", {"from": "peer2"})),
peer1.route(2),
peer2.route(2),
]
yield from asyncio.wait(tasks + peer1.tasks + peer2.tasks)
yield from conns[0].close()
yield from conns[1].close()
self.assertEqual(tasks[0].result()["from"], "peer1")
self.assertEqual(tasks[0].result()["tag"], True)
self.assertEqual(tasks[1].result()["from"], "peer2")
self.assertEqual(tasks[1].result()["tag"], True)
@async
def test_streaming_multiple_results(self):
rpc = RPC("json", self.loop)
@asyncio.coroutine
def counter(ch):
count, _ = yield from ch.recv()
for i in range(count):
n = i+1
yield from ch.send({"num": n}, n != count)
rpc.register("count", counter)
client, server = yield from peer_pair(self.loop, rpc)
ch = client.open("count")
yield from ch.send(5)
yield from server.route(1)
@asyncio.coroutine
def handle_results():
more = True
loops = 0
count = 0
while more:
reply, more = yield from ch.recv()
count += reply['num']
loops += 1
assert loops <= 5
return count
tasks = [
self.spawn(handle_results()),
client.route(5), # kinda defeats the point
]
yield from asyncio.wait(tasks + server.tasks)
yield from client.close()
yield from server.close()
self.assertEqual(tasks[0].result(), 15)
@async
def test_streaming_multiple_arguments(self):
rpc = RPC("json", self.loop)
@asyncio.coroutine
def adder(ch):
more = True
total = 0
while more:
count, more = yield from ch.recv()
total += count
yield from ch.send(total)
rpc.register("adder", adder)
client, server = yield from peer_pair(self.loop, rpc)
ch = client.open("adder")
@asyncio.coroutine
def asyncio_sucks():
for i in range(5):
n = i+1
yield from ch.send(n, n != 5)
total, _ = yield from ch.recv()
return total
tasks = [
self.spawn(asyncio_sucks()),
server.route(5),
client.route(1),
]
yield from asyncio.wait(tasks + server.tasks)
yield from client.close()
yield from server.close()
self.assertEqual(tasks[0].result(), 15)
@async
def test_custom_codec(self):
b64json = [
'b64json',
lambda obj: base64.b64encode(json.dumps(obj).encode("utf-8")).decode("utf-8"),
lambda s: json.loads(base64.b64decode(s.encode("utf-8")).decode("utf-8")),
]
rpc = RPC(b64json, self.loop)
rpc.register("echo", echo)
client, server = yield from peer_pair(self.loop, rpc)
args = {"foo": "bar"}
tasks = [
self.spawn(client.call("echo", args)),
server.route(1),
client.route(1),
]
yield from asyncio.wait(tasks + server.tasks)
yield from client.close()
yield from server.close()
self.assertEqual(tasks[0].result(), args)
@async
def test_ext_fields(self):
rpc = RPC("json", self.loop)
rpc.register("echo", echo)
client, server = yield from peer_pair(self.loop, rpc)
args = {"foo": "bar"}
ext = {"hidden": "metadata"}
ch = client.open("echo")
ch.ext = ext
yield from ch.send(args)
yield from server.route(1)
yield from client.route(1)
reply, _ = yield from ch.recv()
self.assertEqual(args, reply)
yield from client.close()
yield from server.close()
msg = json.loads(server.conn.sent[1])
self.assertEqual(msg['ext'], ext)
@async
def test_register_func_and_callback_func(self):
rpc = RPC("json", self.loop)
@asyncio.coroutine
def callback(args, ch):
ret = yield from ch.call(args[0], args[1])
return ret
rpc.register_func("callback", callback)
client, server = yield from peer_pair(self.loop, rpc)
upper = rpc.callback_func(lambda arg,ch: arg.upper())
tasks = [
self.spawn(client.call("callback", [upper, "hello"])),
server.route(2),
client.route(2),
]
yield from asyncio.wait(tasks)
yield from client.close()
yield from server.close()
self.assertEqual(tasks[0].result(), "HELLO")
| |
#!/usr/bin/env python3
# -*- encoding:utf-8 -*-
"""
gh_lists.py MILESTONE
Functions for Github API requests.
"""
import os
import re
import sys
import json
import collections
import argparse
import datetime
import time
from urllib.request import urlopen, Request, HTTPError
Issue = collections.namedtuple('Issue', ('id', 'title', 'url'))
def main():
p = argparse.ArgumentParser(usage=__doc__.lstrip())
p.add_argument('--project', default='scipy/scipy')
p.add_argument('milestone')
args = p.parse_args()
getter = CachedGet('gh_cache.json', GithubGet())
try:
milestones = get_milestones(getter, args.project)
if args.milestone not in milestones:
msg = "Milestone {0} not available. Available milestones: {1}"
msg = msg.format(args.milestone, u", ".join(sorted(milestones)))
p.error(msg)
issues = get_issues(getter, args.project, args.milestone)
issues.sort()
finally:
getter.save()
prs = [x for x in issues if u'/pull/' in x.url]
issues = [x for x in issues if x not in prs]
def print_list(title, items):
print()
print(title)
print("-"*len(title))
print()
for issue in items:
msg = u"* `#{0} <{1}>`__: {2}"
# sanitize whitespace, `, and *
title = re.sub(u"\\s+", u" ", issue.title.strip())
title = title.replace(u'`', u'\\`').replace(u'*', u'\\*')
if len(title) > 60:
remainder = re.sub(u"\\s.*$", u"...", title[60:])
if len(remainder) > 20:
remainder = title[:80] + u"..."
else:
title = title[:60] + remainder
msg = msg.format(issue.id, issue.url, title)
print(msg)
print()
msg = u"Issues closed for {0}".format(args.milestone)
print_list(msg, issues)
msg = u"Pull requests for {0}".format(args.milestone)
print_list(msg, prs)
return 0
def get_milestones(getter, project):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
data = getter.get(url)
milestones = {}
for ms in data:
milestones[ms[u'title']] = ms[u'number']
return milestones
def get_issues(getter, project, milestone):
milestones = get_milestones(getter, project)
mid = milestones[milestone]
url = "https://api.github.com/repos/{project}/issues?milestone={mid}&state=closed&sort=created&direction=asc"
url = url.format(project=project, mid=mid)
data = getter.get(url)
issues = []
for issue_data in data:
issues.append(Issue(issue_data[u'number'],
issue_data[u'title'],
issue_data[u'html_url']))
return issues
class CachedGet:
def __init__(self, filename, getter):
self._getter = getter
self.filename = filename
if os.path.isfile(filename):
print("[gh_lists] using {0} as cache (remove it if you want fresh data)".format(filename),
file=sys.stderr)
with open(filename, 'r', encoding='utf-8') as f:
self.cache = json.load(f)
else:
self.cache = {}
def get(self, url):
if url not in self.cache:
data = self._getter.get_multipage(url)
self.cache[url] = data
return data
else:
print("[gh_lists] (cached):", url, file=sys.stderr, flush=True)
return self.cache[url]
def save(self):
tmp = self.filename + ".new"
with open(tmp, 'w', encoding='utf-8') as f:
json.dump(self.cache, f)
os.rename(tmp, self.filename)
class GithubGet:
def __init__(self, auth=False):
self.headers = {'User-Agent': 'gh_lists.py',
'Accept': 'application/vnd.github.v3+json'}
if auth:
self.authenticate()
req = self.urlopen('https://api.github.com/rate_limit')
try:
if req.getcode() != 200:
raise RuntimeError()
info = json.loads(req.read().decode('utf-8'))
finally:
req.close()
self.ratelimit_remaining = int(info['rate']['remaining'])
self.ratelimit_reset = float(info['rate']['reset'])
def authenticate(self):
print("Input a Github API access token.\n"
"Personal tokens can be created at https://github.com/settings/tokens\n"
"This script does not require any permissions (so don't give it any).",
file=sys.stderr, flush=True)
print("Access token: ", file=sys.stderr, end='', flush=True)
token = input()
self.headers['Authorization'] = 'token {0}'.format(token.strip())
def urlopen(self, url, auth=None):
assert url.startswith('https://')
req = Request(url, headers=self.headers)
return urlopen(req, timeout=60)
def get_multipage(self, url):
data = []
while url:
page_data, info, next_url = self.get(url)
data += page_data
url = next_url
return data
def get(self, url):
while True:
# Wait until rate limit
while self.ratelimit_remaining == 0 and self.ratelimit_reset > time.time():
s = self.ratelimit_reset + 5 - time.time()
if s <= 0:
break
print("[gh_lists] rate limit exceeded: waiting until {0} ({1} s remaining)".format(
datetime.datetime.fromtimestamp(self.ratelimit_reset).strftime('%Y-%m-%d %H:%M:%S'),
int(s)),
file=sys.stderr, flush=True)
time.sleep(min(5*60, s))
# Get page
print("[gh_lists] get:", url, file=sys.stderr, flush=True)
try:
req = self.urlopen(url)
try:
code = req.getcode()
info = req.info()
data = json.loads(req.read().decode('utf-8'))
finally:
req.close()
except HTTPError as err:
code = err.getcode()
info = err.info()
data = None
if code not in (200, 403):
raise RuntimeError()
# Parse reply
next_url = None
if 'Link' in info:
m = re.search('<([^<>]*)>; rel="next"', info['Link'])
if m:
next_url = m.group(1)
# Update rate limit info
if 'X-RateLimit-Remaining' in info:
self.ratelimit_remaining = int(info['X-RateLimit-Remaining'])
if 'X-RateLimit-Reset' in info:
self.ratelimit_reset = float(info['X-RateLimit-Reset'])
# Deal with rate limit exceeded
if code != 200 or data is None:
if self.ratelimit_remaining == 0:
continue
else:
raise RuntimeError()
# Done.
return data, info, next_url
if __name__ == "__main__":
sys.exit(main())
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different model implementation plus a general port for all the models."""
import functools
from typing import Any, Callable
from flax import linen as nn
import gin
import jax
from jax import random
import jax.numpy as jnp
from internal import mip
from internal import utils
@gin.configurable
class MipNerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs."""
num_samples: int = 128 # The number of samples per level.
num_levels: int = 2 # The number of sampling levels.
resample_padding: float = 0.01 # Dirichlet/alpha "padding" on the histogram.
stop_level_grad: bool = True # If True, don't backprop across levels')
use_viewdirs: bool = True # If True, use view directions as a condition.
lindisp: bool = False # If True, sample linearly in disparity, not in depth.
ray_shape: str = 'cone' # The shape of cast rays ('cone' or 'cylinder').
min_deg_point: int = 0 # Min degree of positional encoding for 3D points.
max_deg_point: int = 16 # Max degree of positional encoding for 3D points.
deg_view: int = 4 # Degree of positional encoding for viewdirs.
density_activation: Callable[..., Any] = nn.softplus # Density activation.
density_noise: float = 0. # Standard deviation of noise added to raw density.
density_bias: float = -1. # The shift added to raw densities pre-activation.
rgb_activation: Callable[..., Any] = nn.sigmoid # The RGB activation.
rgb_padding: float = 0.001 # Padding added to the RGB outputs.
disable_integration: bool = False # If True, use PE instead of IPE.
@nn.compact
def __call__(self, rng, rays, randomized, white_bkgd):
"""The mip-NeRF Model.
Args:
rng: jnp.ndarray, random number generator.
rays: util.Rays, a namedtuple of ray origins, directions, and viewdirs.
randomized: bool, use randomized stratified sampling.
white_bkgd: bool, if True, use white as the background (black o.w.).
Returns:
ret: list, [*(rgb, distance, acc)]
"""
# Construct the MLP.
mlp = MLP()
ret = []
for i_level in range(self.num_levels):
key, rng = random.split(rng)
if i_level == 0:
# Stratified sampling along rays
t_vals, samples = mip.sample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
self.num_samples,
rays.near,
rays.far,
randomized,
self.lindisp,
self.ray_shape,
)
else:
t_vals, samples = mip.resample_along_rays(
key,
rays.origins,
rays.directions,
rays.radii,
t_vals,
weights,
randomized,
self.ray_shape,
self.stop_level_grad,
resample_padding=self.resample_padding,
)
if self.disable_integration:
samples = (samples[0], jnp.zeros_like(samples[1]))
samples_enc = mip.integrated_pos_enc(
samples,
self.min_deg_point,
self.max_deg_point,
)
# Point attribute predictions
if self.use_viewdirs:
viewdirs_enc = mip.pos_enc(
rays.viewdirs,
min_deg=0,
max_deg=self.deg_view,
append_identity=True,
)
raw_rgb, raw_density = mlp(samples_enc, viewdirs_enc)
else:
raw_rgb, raw_density = mlp(samples_enc)
# Add noise to regularize the density predictions if needed.
if randomized and (self.density_noise > 0):
key, rng = random.split(rng)
raw_density += self.density_noise * random.normal(
key, raw_density.shape, dtype=raw_density.dtype)
# Volumetric rendering.
rgb = self.rgb_activation(raw_rgb)
rgb = rgb * (1 + 2 * self.rgb_padding) - self.rgb_padding
density = self.density_activation(raw_density + self.density_bias)
comp_rgb, distance, acc, weights = mip.volumetric_rendering(
rgb,
density,
t_vals,
rays.directions,
white_bkgd=white_bkgd,
)
ret.append((comp_rgb, distance, acc))
return ret
def construct_mipnerf(rng, example_batch):
"""Construct a Neural Radiance Field.
Args:
rng: jnp.ndarray. Random number generator.
example_batch: dict, an example of a batch of data.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
model = MipNerfModel()
key, rng = random.split(rng)
init_variables = model.init(
key,
rng=rng,
rays=utils.namedtuple_map(lambda x: x[0], example_batch['rays']),
randomized=False,
white_bkgd=False)
return model, init_variables
@gin.configurable
class MLP(nn.Module):
"""A simple MLP."""
net_depth: int = 8 # The depth of the first part of MLP.
net_width: int = 256 # The width of the first part of MLP.
net_depth_condition: int = 1 # The depth of the second part of MLP.
net_width_condition: int = 128 # The width of the second part of MLP.
net_activation: Callable[..., Any] = nn.relu # The activation function.
skip_layer: int = 4 # Add a skip connection to the output of every N layers.
num_rgb_channels: int = 3 # The number of RGB channels.
num_density_channels: int = 1 # The number of density channels.
@nn.compact
def __call__(self, x, condition=None):
"""Evaluate the MLP.
Args:
x: jnp.ndarray(float32), [batch, num_samples, feature], points.
condition: jnp.ndarray(float32), [batch, feature], if not None, this
variable will be part of the input to the second part of the MLP
concatenated with the output vector of the first part of the MLP. If
None, only the first part of the MLP will be used with input x. In the
original paper, this variable is the view direction.
Returns:
raw_rgb: jnp.ndarray(float32), with a shape of
[batch, num_samples, num_rgb_channels].
raw_density: jnp.ndarray(float32), with a shape of
[batch, num_samples, num_density_channels].
"""
feature_dim = x.shape[-1]
num_samples = x.shape[1]
x = x.reshape([-1, feature_dim])
dense_layer = functools.partial(
nn.Dense, kernel_init=jax.nn.initializers.glorot_uniform())
inputs = x
for i in range(self.net_depth):
x = dense_layer(self.net_width)(x)
x = self.net_activation(x)
if i % self.skip_layer == 0 and i > 0:
x = jnp.concatenate([x, inputs], axis=-1)
raw_density = dense_layer(self.num_density_channels)(x).reshape(
[-1, num_samples, self.num_density_channels])
if condition is not None:
# Output of the first part of MLP.
bottleneck = dense_layer(self.net_width)(x)
# Broadcast condition from [batch, feature] to
# [batch, num_samples, feature] since all the samples along the same ray
# have the same viewdir.
condition = jnp.tile(condition[:, None, :], (1, num_samples, 1))
# Collapse the [batch, num_samples, feature] tensor to
# [batch * num_samples, feature] so that it can be fed into nn.Dense.
condition = condition.reshape([-1, condition.shape[-1]])
x = jnp.concatenate([bottleneck, condition], axis=-1)
# Here use 1 extra layer to align with the original nerf model.
for i in range(self.net_depth_condition):
x = dense_layer(self.net_width_condition)(x)
x = self.net_activation(x)
raw_rgb = dense_layer(self.num_rgb_channels)(x).reshape(
[-1, num_samples, self.num_rgb_channels])
return raw_rgb, raw_density
def render_image(render_fn, rays, rng, chunk=8192):
"""Render all the pixels of an image (in test mode).
Args:
render_fn: function, jit-ed render function.
rays: a `Rays` namedtuple, the rays to be rendered.
rng: jnp.ndarray, random number generator (used in training mode only).
chunk: int, the size of chunks to render sequentially.
Returns:
rgb: jnp.ndarray, rendered color image.
disp: jnp.ndarray, rendered disparity image.
acc: jnp.ndarray, rendered accumulated weights per pixel.
"""
height, width = rays[0].shape[:2]
num_rays = height * width
rays = utils.namedtuple_map(lambda r: r.reshape((num_rays, -1)), rays)
host_id = jax.host_id()
results = []
for i in range(0, num_rays, chunk):
# pylint: disable=cell-var-from-loop
chunk_rays = utils.namedtuple_map(lambda r: r[i:i + chunk], rays)
chunk_size = chunk_rays[0].shape[0]
rays_remaining = chunk_size % jax.device_count()
if rays_remaining != 0:
padding = jax.device_count() - rays_remaining
chunk_rays = utils.namedtuple_map(
lambda r: jnp.pad(r, ((0, padding), (0, 0)), mode='edge'), chunk_rays)
else:
padding = 0
# After padding the number of chunk_rays is always divisible by
# host_count.
rays_per_host = chunk_rays[0].shape[0] // jax.host_count()
start, stop = host_id * rays_per_host, (host_id + 1) * rays_per_host
chunk_rays = utils.namedtuple_map(lambda r: utils.shard(r[start:stop]),
chunk_rays)
chunk_results = render_fn(rng, chunk_rays)[-1]
results.append([utils.unshard(x[0], padding) for x in chunk_results])
# pylint: enable=cell-var-from-loop
rgb, distance, acc = [jnp.concatenate(r, axis=0) for r in zip(*results)]
rgb = rgb.reshape((height, width, -1))
distance = distance.reshape((height, width))
acc = acc.reshape((height, width))
return (rgb, distance, acc)
| |
###############################################################################
# WrapperPotential.py: Super-class for wrapper potentials
###############################################################################
from .Potential import Potential, _isNonAxi, _dim
from .planarPotential import planarPotential
from .Potential import _evaluatePotentials, \
_evaluateRforces, _evaluatephiforces, _evaluatezforces, \
evaluateR2derivs, evaluatez2derivs, \
evaluateRzderivs, evaluateDensities
from .planarPotential import _evaluateplanarPotentials, \
_evaluateplanarRforces, _evaluateplanarphiforces, \
evaluateplanarR2derivs
from ..util.conversion import physical_compatible, get_physical
def _new_obj(cls, kwargs, args):
"""Maps kwargs to cls.__new__"""
return cls.__new__(cls, *args, **kwargs)
class parentWrapperPotential(object):
"""'Dummy' class only used to delegate wrappers to either 2D planarWrapperPotential or 3D WrapperPotential based on pot's dimensionality, using a little python object creation magic..."""
def __new__(cls,*args,**kwargs):
if kwargs.pop('_init',False):
# When we get here recursively, just create new object
return object.__new__(cls)
# Decide whether superclass is Wrapper or planarWrapper based on dim
pot= kwargs.get('pot',None)
if _dim(pot) == 2:
parentWrapperPotential= planarWrapperPotential
elif _dim(pot) == 3:
parentWrapperPotential= WrapperPotential
else:
raise ValueError("WrapperPotentials are only supported in 3D and 2D")
# Create object from custom class that derives from correct wrapper,
# make sure to turn off normalization for all wrappers
kwargs['_init']= True # to break recursion above
# __reduce__ method to allow pickling
reduce= lambda self: (_new_obj, (cls, kwargs, args), self.__dict__)
out= type.__new__(type,'_%s' % cls.__name__,
(parentWrapperPotential,cls),
{'normalize':property(),
'__reduce__':reduce})(*args,**kwargs)
kwargs.pop('_init',False)
# This runs init for the subclass (the specific wrapper)
cls.__init__(out,*args,**kwargs)
return out
class WrapperPotential(Potential):
def __init__(self,amp=1.,pot=None,ro=None,vo=None,_init=None,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize a WrapperPotential, a super-class for wrapper potentials
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
if not _init: return None # Don't run __init__ at the end of setup
Potential.__init__(self,amp=amp,ro=ro,vo=vo)
self._pot= pot
self.isNonAxi= _isNonAxi(self._pot)
# Check whether units are consistent between the wrapper and the
# wrapped potential
assert physical_compatible(self,self._pot), \
"""Physical unit conversion parameters (ro,vo) are not """\
"""compatible between this wrapper and the wrapped potential"""
# Transfer unit system if set for wrapped potential, but not here
phys_wrapped= get_physical(self._pot,include_set=True)
if not self._roSet and phys_wrapped['roSet']:
self.turn_physical_on(ro=phys_wrapped['ro'],vo=False)
if not self._voSet and phys_wrapped['voSet']:
self.turn_physical_on(vo=phys_wrapped['vo'],ro=False)
def __repr__(self):
wrapped_repr= repr(self._pot)
return Potential.__repr__(self) + ', wrapper of' \
+ ''.join(['\n\t{}'.format(s) for s in wrapped_repr.split('\n')])
def __getattr__(self,attribute):
if attribute == '_evaluate' \
or attribute == '_Rforce' or attribute == '_zforce' \
or attribute == '_phiforce' \
or attribute == '_R2deriv' or attribute == '_z2deriv' \
or attribute == '_Rzderiv' or attribute == '_phi2deriv' \
or attribute == '_Rphideriv' or attribute == '_dens':
return lambda R,Z,phi=0.,t=0.: \
self._wrap(attribute,R,Z,phi=phi,t=t)
else:
return super(WrapperPotential,self).__getattr__(attribute)
def _wrap_pot_func(self,attribute):
if attribute == '_evaluate':
return lambda p,R,Z,phi=0.,t=0.: \
_evaluatePotentials(p,R,Z,phi=phi,t=t)
elif attribute == '_dens':
return lambda p,R,Z,phi=0.,t=0.: \
evaluateDensities(p,R,Z,phi=phi,t=t,use_physical=False)
elif attribute == '_Rforce':
return lambda p,R,Z,phi=0.,t=0.: \
_evaluateRforces(p,R,Z,phi=phi,t=t)
elif attribute == '_zforce':
return lambda p,R,Z,phi=0.,t=0.: \
_evaluatezforces(p,R,Z,phi=phi,t=t)
elif attribute == '_phiforce':
return lambda p,R,Z,phi=0.,t=0.: \
_evaluatephiforces(p,R,Z,phi=phi,t=t)
elif attribute == '_R2deriv':
return lambda p,R,Z,phi=0.,t=0.: \
evaluateR2derivs(p,R,Z,phi=phi,t=t,use_physical=False)
elif attribute == '_z2deriv':
return lambda p,R,Z,phi=0.,t=0.: \
evaluatez2derivs(p,R,Z,phi=phi,t=t,use_physical=False)
elif attribute == '_Rzderiv':
return lambda p,R,Z,phi=0.,t=0.: \
evaluateRzderivs(p,R,Z,phi=phi,t=t,use_physical=False)
elif attribute == '_phi2deriv':
return lambda p,R,Z,phi=0.,t=0.: \
_evaluatePotentials(p,R,Z,phi=phi,t=t,dphi=2)
elif attribute == '_Rphideriv':
return lambda p,R,Z,phi=0.,t=0.: \
_evaluatePotentials(p,R,Z,phi=phi,t=t,dR=1,dphi=1)
else: #pragma: no cover
raise AttributeError("Attribute %s not found in for this WrapperPotential" % attribute)
class planarWrapperPotential(planarPotential):
def __init__(self,amp=1.,pot=None,ro=None,vo=None,_init=None,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize a WrapperPotential, a super-class for wrapper potentials
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
if not _init: return None # Don't run __init__ at the end of setup
planarPotential.__init__(self,amp=amp,ro=ro,vo=vo)
self._pot= pot
self.isNonAxi= _isNonAxi(self._pot)
# Check whether units are consistent between the wrapper and the
# wrapped potential
assert physical_compatible(self,self._pot), \
"""Physical unit conversion parameters (ro,vo) are not """\
"""compatible between this wrapper and the wrapped potential"""
# Transfer unit system if set for wrapped potential, but not here
phys_wrapped= get_physical(self._pot,include_set=True)
if not self._roSet and phys_wrapped['roSet']:
self.turn_physical_on(ro=phys_wrapped['ro'],vo=False)
if not self._voSet and phys_wrapped['voSet']:
self.turn_physical_on(vo=phys_wrapped['vo'],ro=False)
def __repr__(self):
wrapped_repr= repr(self._pot)
return Potential.__repr__(self) + ', wrapper of' \
+ ''.join(['\n\t{}'.format(s) for s in wrapped_repr.split('\n')])
def __getattr__(self,attribute):
if attribute == '_evaluate' \
or attribute == '_Rforce' \
or attribute == '_phiforce' \
or attribute == '_R2deriv' \
or attribute == '_phi2deriv' \
or attribute == '_Rphideriv':
return lambda R,phi=0.,t=0.: \
self._wrap(attribute,R,phi=phi,t=t)
else:
return super(planarWrapperPotential,self).__getattr__(attribute)
def _wrap_pot_func(self,attribute):
if attribute == '_evaluate':
return lambda p,R,phi=0.,t=0.: \
_evaluateplanarPotentials(p,R,phi=phi,t=t)
elif attribute == '_Rforce':
return lambda p,R,phi=0.,t=0.: \
_evaluateplanarRforces(p,R,phi=phi,t=t)
elif attribute == '_phiforce':
return lambda p,R,phi=0.,t=0.: \
_evaluateplanarphiforces(p,R,phi=phi,t=t)
elif attribute == '_R2deriv':
return lambda p,R,phi=0.,t=0.: \
evaluateplanarR2derivs(p,R,phi=phi,t=t,use_physical=False)
elif attribute == '_phi2deriv':
return lambda p,R,phi=0.,t=0.: \
_evaluateplanarPotentials(p,R,phi=phi,t=t,dphi=2)
elif attribute == '_Rphideriv':
return lambda p,R,phi=0.,t=0.: \
_evaluateplanarPotentials(p,R,phi=phi,t=t,dR=1,dphi=1)
else: #pragma: no cover
raise AttributeError("Attribute %s not found in for this WrapperPotential" % attribute)
| |
import os
import tempfile
import shutil
from nose.tools import eq_
from dingus import Dingus
from dingus import patch
from build_pack_utils import utils
from common.dingus_extension import patches
class TestComposer(object):
def __init__(self):
self.extension_module = utils.load_extension('extensions/composer')
def setUp(self):
os.environ['COMPOSER_GITHUB_OAUTH_TOKEN'] = ""
assert(os.getenv('COMPOSER_GITHUB_OAUTH_TOKEN') == "")
def test_composer_tool_should_compile(self):
ctx = utils.FormattedDict({
'BUILD_DIR': 'tests/data/composer',
'CACHE_DIR': '/cache/dir',
'PHP_VM': 'will_default_to_php_strategy',
'WEBDIR': 'htdocs',
'LIBDIR': 'lib',
'NO_WEBDIR_SET': False
})
ct = self.extension_module.ComposerExtension(ctx)
assert ct._should_compile()
def test_composer_tool_should_compile_not_found(self):
ctx = utils.FormattedDict({
'BUILD_DIR': 'lib',
'CACHE_DIR': '/cache/dir',
'PHP_VM': 'will_default_to_php_strategy',
'WEBDIR': 'htdocs',
'LIBDIR': 'lib',
'NO_WEBDIR_SET': False
})
ct = self.extension_module.ComposerExtension(ctx)
assert not ct._should_compile()
def test_composer_tool_install(self):
ctx = utils.FormattedDict({
'PHP_VM': 'will_default_to_php_strategy',
'BUILD_DIR': '/build/dir',
'CACHE_DIR': '/cache/dir',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
builder = Dingus(_ctx=ctx)
installer = Dingus()
cfInstaller = Dingus()
builder.install = Dingus(_installer=cfInstaller,
return_value=installer)
ct = self.extension_module.ComposerExtension(ctx)
ct._builder = builder
ct.install()
eq_(2, len(builder.install.calls()))
# make sure PHP is installed
assert installer.package.calls().once()
eq_('PHP', installer.package.calls()[0].args[0])
call = installer.package.calls()[0]
assert call.return_value.calls().once()
assert installer.calls().once()
# make sure composer is installed
assert installer._installer.calls().once()
assert installer._installer.calls()[0].args[0] == \
'/composer/1.0.0-alpha10/composer.phar', \
"was %s" % installer._installer.calls()[0].args[0]
def test_composer_tool_install_latest(self):
ctx = utils.FormattedDict({
'PHP_VM': 'will_default_to_php_strategy',
'BUILD_DIR': '/build/dir',
'CACHE_DIR': '/cache/dir',
'COMPOSER_VERSION': 'latest',
'BP_DIR': '',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
builder = Dingus(_ctx=ctx)
installer = Dingus()
cfInstaller = Dingus()
builder.install = Dingus(_installer=cfInstaller,
return_value=installer)
ct = self.extension_module.ComposerExtension(ctx)
ct._builder = builder
ct.install()
eq_(2, len(builder.install.calls()))
# make sure PHP is installed
assert installer.package.calls().once()
eq_('PHP', installer.package.calls()[0].args[0])
call = installer.package.calls()[0]
assert call.return_value.calls().once()
assert installer.calls().once()
# make sure composer is installed
assert installer._installer.calls().once()
assert installer._installer.calls()[0].args[0] == \
'https://getcomposer.org/composer.phar', \
"was %s" % installer._installer.calls()[0].args[0]
def test_composer_run_streams_output(self):
ctx = utils.FormattedDict({
'PHP_VM': 'hhvm', # PHP strategy does other stuff
'BUILD_DIR': '/build/dir',
'CACHE_DIR': '/cache/dir',
'TMPDIR': tempfile.gettempdir(),
'WEBDIR': 'htdocs',
'LIBDIR': 'lib',
'BP_DIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"rate": {"limit": 60, "remaining": 60}}""")
stream_output_stub = Dingus()
builder = Dingus(_ctx=ctx)
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub
}):
ct = self.extension_module.ComposerExtension(ctx)
ct._builder = builder
ct.composer_runner = \
self.extension_module.ComposerCommandRunner(ctx, builder)
ct.run()
stream_output_calls = stream_output_stub.calls()
assert 2 == len(stream_output_calls), \
"The number of stream_output calls returned %s, expected 2" % len(stream_output_stub.calls())
instCmd = stream_output_calls[-1].args[1]
assert instCmd.find('/build/dir/php/bin/composer.phar') > 0
assert instCmd.find('install') > 0
assert instCmd.find('--no-progress') > 0
assert instCmd.find('--no-interaction') > 0
assert instCmd.find('--no-dev') > 0
def test_composer_run_streams_debug_output(self):
ctx = utils.FormattedDict({
'PHP_VM': 'hhvm', # PHP strategy does other stuff
'BUILD_DIR': '/build/dir',
'CACHE_DIR': '/cache/dir',
'TMPDIR': tempfile.gettempdir(),
'WEBDIR': 'htdocs',
'LIBDIR': 'lib',
'BP_DEBUG': 'True',
'BP_DIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus(return_value="""{"rate": {"limit": 60, "remaining": 60}}""")
stream_output_stub = Dingus()
builder = Dingus(_ctx=ctx)
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub
}):
ct = self.extension_module.ComposerExtension(ctx)
ct._builder = builder
ct.composer_runner = \
self.extension_module.ComposerCommandRunner(ctx, builder)
ct.run()
stream_output_calls = stream_output_stub.calls()
assert 3 == len(stream_output_calls), \
"The number of stream_output calls returned %s, expected 3" % len(stream_output_stub.calls())
# first is called `composer -V`
verCmd = stream_output_calls[0].args[1]
assert verCmd.find('composer.phar -V')
# then composer install
instCmd = stream_output_calls[-1].args[1]
assert instCmd.find('/build/dir/php/bin/composer.phar') > 0
assert instCmd.find('install') > 0
assert instCmd.find('--no-progress') > 0
assert instCmd.find('--no-interaction') > 0
assert instCmd.find('--no-dev') > 0
def test_composer_tool_run_custom_composer_opts(self):
ctx = utils.FormattedDict({
'PHP_VM': 'php',
'BUILD_DIR': '/build/dir',
'CACHE_DIR': '/cache/dir',
'TMPDIR': tempfile.gettempdir(),
'WEBDIR': 'htdocs',
'LIBDIR': 'lib',
'COMPOSER_INSTALL_OPTIONS': ['--optimize-autoloader'],
'BP_DIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"rate": {"limit": 60, "remaining": 60}}""")
stream_output_stub = Dingus()
rewrite_stub = Dingus()
builder = Dingus(_ctx=ctx)
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
'composer.extension.utils.rewrite_cfgs': rewrite_stub
}):
ct = self.extension_module.ComposerExtension(ctx)
ct._builder = builder
ct.composer_runner = \
self.extension_module.ComposerCommandRunner(ctx, builder)
ct.run()
eq_(1, len(builder.copy.calls()))
assert rewrite_stub.calls().once()
rewrite_args = rewrite_stub.calls()[0].args
assert rewrite_args[0].endswith('php.ini')
assert 'HOME' in rewrite_args[1]
assert 'TMPDIR' in rewrite_args[1]
instCmd = stream_output_stub.calls()[-1].args[1]
assert instCmd.find('--optimize-autoloader') > 0
def test_composer_tool_run_sanity_checks(self):
ctx = utils.FormattedDict({
'PHP_VM': 'php',
'BUILD_DIR': '/build/dir',
'CACHE_DIR': '/cache/dir',
'WEBDIR': '',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'BP_DIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"rate": {"limit": 60, "remaining": 60}}""")
stream_output_stub = Dingus()
rewrite_stub = Dingus()
builder = Dingus(_ctx=ctx)
exists_stub = Dingus()
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
'composer.extension.utils.rewrite_cfgs': rewrite_stub
}):
composer_extension = \
self.extension_module.ComposerExtension(ctx)
composer_extension._log = Dingus()
composer_extension._builder = builder
composer_extension.composer_runner = \
self.extension_module.ComposerCommandRunner(ctx, builder)
composer_extension.run()
composer_extension_calls = composer_extension._log.warning.calls()
assert len(composer_extension_calls) > 0
assert composer_extension_calls[0].args[0].find('PROTIP:') == 0
exists = Dingus(return_value=True)
with patch('os.path.exists', exists_stub):
composer_extension._log = Dingus()
composer_extension.run()
assert len(composer_extension._log.warning.calls()) == 0
def test_process_commands(self):
eq_(0, len(self.extension_module.preprocess_commands({
'BUILD_DIR': '',
'WEBDIR': '',
'PHP_VM': ''
})))
def test_service_commands(self):
eq_(0, len(self.extension_module.service_commands({
'BUILD_DIR': '',
'WEBDIR': '',
'PHP_VM': ''
})))
def test_service_environment(self):
eq_(0, len(self.extension_module.service_environment({
'BUILD_DIR': '',
'WEBDIR': '',
'PHP_VM': ''
})))
def test_configure_composer_with_php_version(self):
ctx = utils.FormattedDict({
'BUILD_DIR': 'tests/data/composer',
'WEBDIR': '',
'PHP_54_LATEST': '5.4.31'
})
config = self.extension_module.ComposerConfiguration(ctx)
config.configure()
assert 'PHP_EXTENSIONS' in ctx.keys()
assert list == type(ctx['PHP_EXTENSIONS'])
assert 4 == len(ctx['PHP_EXTENSIONS'])
assert 'openssl' == ctx['PHP_EXTENSIONS'][0]
assert 'zip' == ctx['PHP_EXTENSIONS'][1]
assert 'fileinfo' == ctx['PHP_EXTENSIONS'][2]
assert 'gd' == ctx['PHP_EXTENSIONS'][3]
assert '5.4.31' == ctx['PHP_VERSION']
assert 'php' == ctx['PHP_VM']
def test_configure_composer_with_php_version_and_base_extensions(self):
ctx = utils.FormattedDict({
'BUILD_DIR': 'tests/data/composer',
'WEBDIR': '',
'PHP_EXTENSIONS': ['a', 'b'],
'PHP_54_LATEST': '5.4.31'
})
config = self.extension_module.ComposerConfiguration(ctx)
config.configure()
assert 'PHP_EXTENSIONS' in ctx.keys()
assert list == type(ctx['PHP_EXTENSIONS'])
assert 6 == len(ctx['PHP_EXTENSIONS'])
assert 'a' == ctx['PHP_EXTENSIONS'][0]
assert 'b' == ctx['PHP_EXTENSIONS'][1]
assert 'openssl' == ctx['PHP_EXTENSIONS'][2]
assert 'zip' == ctx['PHP_EXTENSIONS'][3]
assert 'fileinfo' == ctx['PHP_EXTENSIONS'][4]
assert 'gd' == ctx['PHP_EXTENSIONS'][5]
assert '5.4.31' == ctx['PHP_VERSION']
assert 'php' == ctx['PHP_VM']
def test_configure_composer_without_php_version(self):
ctx = utils.FormattedDict({
'BUILD_DIR': 'tests/data/composer-no-php',
'WEBDIR': '',
'PHP_VERSION': '5.4.31' # uses bp default
})
config = self.extension_module.ComposerConfiguration(ctx)
config.configure()
assert '5.4.31' == ctx['PHP_VERSION']
assert 'php' == ctx['PHP_VM']
assert 'PHP_EXTENSIONS' in ctx.keys()
assert list == type(ctx['PHP_EXTENSIONS'])
assert 3 == len(ctx['PHP_EXTENSIONS'])
assert 'openssl' == ctx['PHP_EXTENSIONS'][0]
assert 'zip' == ctx['PHP_EXTENSIONS'][1]
assert 'fileinfo' == ctx['PHP_EXTENSIONS'][2]
def test_configure_composer_with_hhvm_version(self):
ctx = utils.FormattedDict({
'BUILD_DIR': 'tests/data/composer-with-hhvm',
'WEBDIR': '',
'HHVM_VERSION': '3.2.0'
})
config = self.extension_module.ComposerConfiguration(ctx)
config.configure()
assert '3.2.0' == ctx['HHVM_VERSION']
assert 'hhvm' == ctx['PHP_VM']
def test_configure_does_not_run_when_no_composer_json(self):
ctx = utils.FormattedDict({
'BUILD_DIR': 'tests/data/app-1',
'WEBDIR': '',
'PHP_EXTENSIONS': ['a', 'b']
})
config = self.extension_module.ComposerConfiguration(ctx)
config.configure()
assert 'PHP_EXTENSIONS' in ctx.keys()
assert list == type(ctx['PHP_EXTENSIONS'])
assert 2 == len(ctx['PHP_EXTENSIONS'])
assert 'a' in ctx['PHP_EXTENSIONS']
assert 'b' in ctx['PHP_EXTENSIONS']
assert 'openssl' not in ctx['PHP_EXTENSIONS']
def test_configure_paths_missing(self):
def fcp_test_json(path):
tmp = fcp_orig(path)
return (tmp[0], None)
def fcp_test_lock(path):
tmp = fcp_orig(path)
return (None, tmp[1])
def fcp_test_none(path):
return (None, None)
ctx = utils.FormattedDict({
'BUILD_DIR': 'tests/data/composer',
'WEBDIR': '',
'PHP_54_LATEST': '5.4.31'
})
fcp_orig = self.extension_module.find_composer_paths
# test when no composer.json or composer.lock files found
self.extension_module.find_composer_paths = fcp_test_none
try:
self.extension_module.ComposerConfiguration(ctx).configure()
assert 'PHP_EXTENSIONS' not in ctx.keys()
finally:
self.extension_module.find_composer_paths = fcp_orig
# test when composer.json found, but no composer.lock
self.extension_module.find_composer_paths = fcp_test_json
try:
self.extension_module.ComposerConfiguration(ctx).configure()
assert 'PHP_EXTENSIONS' in ctx.keys()
assert 3 == len(ctx['PHP_EXTENSIONS'])
assert 'openssl' in ctx['PHP_EXTENSIONS']
assert 'fileinfo' in ctx['PHP_EXTENSIONS']
assert 'zip' in ctx['PHP_EXTENSIONS']
finally:
self.extension_module.find_composer_paths = fcp_orig
# test when composer.lock found, but no composer.json
self.extension_module.find_composer_paths = fcp_test_lock
try:
self.extension_module.ComposerConfiguration(ctx).configure()
assert 'PHP_EXTENSIONS' in ctx.keys()
assert 4 == len(ctx['PHP_EXTENSIONS'])
assert 'openssl' in ctx['PHP_EXTENSIONS']
assert 'gd' in ctx['PHP_EXTENSIONS']
assert 'fileinfo' in ctx['PHP_EXTENSIONS']
assert 'zip' in ctx['PHP_EXTENSIONS']
finally:
self.extension_module.find_composer_paths = fcp_orig
def test_find_composer_php_version(self):
ctx = {'BUILD_DIR': 'tests/data/composer-lock', 'WEBDIR': ''}
config = self.extension_module.ComposerConfiguration(ctx)
php_version = config.read_version_from_composer('php')
eq_('>=5.3', php_version)
def test_pick_php_version(self):
ctx = {
'PHP_VERSION': '5.4.31',
'PHP_54_LATEST': '5.4.31',
'BUILD_DIR': '',
'PHP_55_LATEST': '5.5.15',
'PHP_56_LATEST': '5.6.7',
'WEBDIR': ''
}
pick_php_version = \
self.extension_module.ComposerConfiguration(ctx).pick_php_version
# no PHP 5.3, default to 5.4
eq_('5.4.31', pick_php_version('>=5.3'))
eq_('5.4.31', pick_php_version('5.3.*'))
# latest PHP 5.4 version
eq_('5.4.31', pick_php_version('>=5.4'))
eq_('5.4.31', pick_php_version('5.4.*'))
# extact PHP 5.4 versions
eq_('5.4.31', pick_php_version('5.4.31'))
eq_('5.4.30', pick_php_version('5.4.30'))
eq_('5.4.29', pick_php_version('5.4.29'))
# latest PHP 5.5 version
eq_('5.5.15', pick_php_version('>=5.5'))
eq_('5.5.15', pick_php_version('5.5.*'))
# exact PHP 5.5 versions
eq_('5.5.15', pick_php_version('5.5.15'))
eq_('5.5.14', pick_php_version('5.5.14'))
# latest PHP 5.6 version
eq_('5.6.7', pick_php_version('>=5.6'))
eq_('5.6.7', pick_php_version('5.6.*'))
# exact PHP 5.6 versions
eq_('5.6.7', pick_php_version('5.6.7'))
eq_('5.6.6', pick_php_version('5.6.6'))
# not understood, should default to PHP_VERSION
eq_('5.4.31', pick_php_version(''))
eq_('5.4.31', pick_php_version(None))
eq_('5.4.31', pick_php_version('5.61.1'))
eq_('5.4.31', pick_php_version('<5.5'))
eq_('5.4.31', pick_php_version('<5.4'))
def test_empty_platform_section(self):
exts = self.extension_module.ComposerConfiguration({
'BUILD_DIR': '',
'WEBDIR': ''
}).read_exts_from_path(
'tests/data/composer/composer-phalcon.lock')
eq_(2, len(exts))
eq_('curl', exts[0])
eq_('tokenizer', exts[1])
def test_none_for_extension_reading(self):
exts = self.extension_module.ComposerConfiguration({
'BUILD_DIR': '',
'WEBDIR': ''
}).read_exts_from_path(None)
eq_(0, len(exts))
def test_with_extensions(self):
exts = self.extension_module.ComposerConfiguration({
'BUILD_DIR': '',
'WEBDIR': ''
}).read_exts_from_path(
'tests/data/composer/composer.json')
eq_(2, len(exts))
eq_('zip', exts[0])
eq_('fileinfo', exts[1])
def test_with_oddly_formatted_composer_file(self):
exts = self.extension_module.ComposerConfiguration({
'BUILD_DIR': '',
'WEBDIR': ''
}).read_exts_from_path(
'tests/data/composer/composer-format.json')
eq_(1, len(exts))
eq_('mysqli', exts[0])
def test_composer_defaults(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/tmp/build',
'CACHE_DIR': '/tmp/cache',
'PHP_VM': 'will_default_to_php_strategy',
'LIBDIR': 'lib',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
ct = self.extension_module.ComposerExtension(ctx)
eq_('/tmp/build/lib/vendor', ct._ctx['COMPOSER_VENDOR_DIR'])
eq_('/tmp/build/php/bin', ct._ctx['COMPOSER_BIN_DIR'])
eq_('/tmp/cache/composer', ct._ctx['COMPOSER_CACHE_DIR'])
def test_composer_custom_values(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/tmp/build',
'CACHE_DIR': '/tmp/cache',
'LIBDIR': 'lib',
'COMPOSER_VENDOR_DIR': '{BUILD_DIR}/vendor',
'COMPOSER_BIN_DIR': '{BUILD_DIR}/bin',
'PHP_VM': 'will_default_to_php_strategy',
'COMPOSER_CACHE_DIR': '{CACHE_DIR}/custom',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
ct = self.extension_module.ComposerExtension(ctx)
eq_('/tmp/build/vendor', ct._ctx['COMPOSER_VENDOR_DIR'])
eq_('/tmp/build/bin', ct._ctx['COMPOSER_BIN_DIR'])
eq_('/tmp/cache/custom', ct._ctx['COMPOSER_CACHE_DIR'])
def test_binary_path_for_hhvm(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome/',
'PHP_VM': 'hhvm',
'WEBDIR': ''
})
stg = self.extension_module.HHVMComposerStrategy(ctx)
path = stg.binary_path()
eq_('/usr/awesome/hhvm/usr/bin/hhvm', path)
def test_binary_path_for_php(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHP_VM': 'php'
})
stg = self.extension_module.PHPComposerStrategy(ctx)
path = stg.binary_path()
eq_('/usr/awesome/php/bin/php', path)
def test_build_composer_environment_inherits_from_ctx(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHPRC': '/usr/awesome/phpini',
'PHP_VM': 'php',
'TMPDIR': 'tmp',
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'OUR_SPECIAL_KEY': 'SPECIAL_VALUE',
'NO_WEBDIR_SET': False
})
environ_stub = Dingus()
environ_stub._set_return_value(['OUR_SPECIAL_KEY'])
write_config_stub = Dingus()
with patches({
'os.environ.keys': environ_stub,
'composer.extension.PHPComposerStrategy.write_config': write_config_stub
}):
self.extension_module.ComposerExtension(ctx)
cr = self.extension_module.ComposerCommandRunner(ctx, None)
built_environment = cr._build_composer_environment()
assert 'OUR_SPECIAL_KEY' in built_environment, \
'OUR_SPECIAL_KEY was not found in the built_environment variable'
assert built_environment['OUR_SPECIAL_KEY'] == 'SPECIAL_VALUE', \
'"OUR_SPECIAL_KEY" key in built_environment was %s; expected "SPECIAL_VALUE"' % built_environment['OUR_SPECIAL_KEY']
def test_build_composer_environment_sets_composer_env_vars(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/tmp/build',
'WEBDIR': '',
'CACHE_DIR': '/tmp/cache',
'LIBDIR': 'lib',
'TMPDIR': '/tmp',
'PHP_VM': 'php',
'NO_WEBDIR_SET': False
})
write_config_stub = Dingus()
with patches({
'composer.extension.PHPComposerStrategy.write_config': write_config_stub
}):
self.extension_module.ComposerExtension(ctx)
cr = self.extension_module.ComposerCommandRunner(ctx, None)
built_environment = cr._build_composer_environment()
assert 'COMPOSER_VENDOR_DIR' in built_environment, \
'Expect to find COMPOSER_VENDOR_DIR in built_environment'
assert 'COMPOSER_BIN_DIR' in built_environment, \
'Expect to find COMPOSER_BIN_DIR in built_environment'
assert 'COMPOSER_CACHE_DIR' in built_environment, \
'Expect to find COMPOSER_CACHE_DIR in built_environment'
def test_build_composer_environment_forbids_overwriting_key_vars(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHP_VM': 'php',
'TMPDIR': 'tmp',
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'PHPRC': '/usr/awesome/phpini',
'NO_WEBDIR_SET': False
})
write_config_stub = Dingus()
with patches({
'composer.extension.PHPComposerStrategy.write_config': write_config_stub
}):
self.extension_module.ComposerExtension(ctx)
cr = self.extension_module.ComposerCommandRunner(ctx, None)
built_environment = cr._build_composer_environment()
eq_(built_environment['LD_LIBRARY_PATH'], '/usr/awesome/php/lib')
eq_(built_environment['PHPRC'], 'tmp')
def test_build_composer_environment_converts_vars_to_str(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHP_VM': 'php',
'TMPDIR': 'tmp',
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'PHPRC': '/usr/awesome/phpini',
'MY_DICTIONARY': {'KEY': 'VALUE'},
'NO_WEBDIR_SET': False
})
write_config_stub = Dingus()
with patches({
'composer.extension.PHPComposerStrategy.write_config': write_config_stub
}):
self.extension_module.ComposerExtension(ctx)
cr = self.extension_module.ComposerCommandRunner(ctx, None)
built_environment = cr._build_composer_environment()
for key, val in built_environment.iteritems():
assert type(val) == str, \
"Expected [%s]:[%s] to be type `str`, but found type [%s]" % (
key, val, type(val))
def test_build_composer_environment_has_missing_key(self):
os.environ['SOME_KEY'] = 'does not matter'
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHP_VM': 'php',
'TMPDIR': 'tmp',
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'SOME_KEY': utils.wrap('{exact_match}'),
'NO_WEBDIR_SET': False
})
write_config_stub = Dingus()
with patches({
'composer.extension.PHPComposerStrategy.write_config': write_config_stub
}):
self.extension_module.ComposerExtension(ctx)
cr = self.extension_module.ComposerCommandRunner(ctx, None)
try:
built_environment = cr._build_composer_environment()
assert "{exact_match}" == built_environment['SOME_KEY'], \
"value should match"
except KeyError, e:
assert 'exact_match' != e.message, \
"Should not try to evaluate value [%s]" % e
raise
def test_build_composer_environment_no_path(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHP_VM': 'php',
'TMPDIR': 'tmp',
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'NO_WEBDIR_SET': False
})
write_config_stub = Dingus()
with patches({
'composer.extension.PHPComposerStrategy.write_config': write_config_stub
}):
self.extension_module.ComposerExtension(ctx)
cr = self.extension_module.ComposerCommandRunner(ctx, None)
built_environment = cr._build_composer_environment()
assert 'PATH' in built_environment, "should have PATH set"
assert "/usr/awesome/php/bin" == built_environment['PATH'], \
"PATH should contain path to PHP, found [%s]" \
% built_environment['PATH']
def test_build_composer_environment_existing_path(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHP_VM': 'php',
'TMPDIR': 'tmp',
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'PATH': '/bin:/usr/bin',
'NO_WEBDIR_SET': False
})
write_config_stub = Dingus()
with patches({
'composer.extension.PHPComposerStrategy.write_config': write_config_stub
}):
self.extension_module.ComposerExtension(ctx)
cr = self.extension_module.ComposerCommandRunner(ctx, None)
built_environment = cr._build_composer_environment()
assert 'PATH' in built_environment, "should have PATH set"
assert built_environment['PATH'].endswith(":/usr/awesome/php/bin"), \
"PATH should contain path to PHP, found [%s]" \
% built_environment['PATH']
def test_ld_library_path_for_hhvm(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome/',
'WEBDIR': '',
'PHP_VM': 'hhvm'
})
stg = self.extension_module.HHVMComposerStrategy(ctx)
path = stg.ld_library_path()
eq_('/usr/awesome/hhvm/usr/lib/hhvm', path)
def test_ld_library_path_for_php(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'WEBDIR': '',
'PHP_VM': 'php'
})
stg = self.extension_module.PHPComposerStrategy(ctx)
path = stg.ld_library_path()
eq_('/usr/awesome/php/lib', path)
def test_run_sets_github_oauth_token_if_present(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'COMPOSER_GITHUB_OAUTH_TOKEN': 'MADE_UP_TOKEN_VALUE',
'BP_DIR': '',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"rate": {"limit": 60, "remaining": 60}}""")
stream_output_stub = Dingus()
rewrite_stub = Dingus()
environ_stub = Dingus()
environ_stub._set_return_value('MADE_UP_TOKEN_VALUE')
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
'composer.extension.utils.rewrite_cfgs': rewrite_stub,
'os.environ.get': environ_stub
}):
ct = self.extension_module.ComposerExtension(ctx)
builder_stub = Dingus(_ctx=ctx)
ct._builder = builder_stub
ct.composer_runner = \
self.extension_module.ComposerCommandRunner(ctx, builder_stub)
github_oauth_token_is_valid_stub = Dingus(
'test_run_sets_github_oauth_token_if_present:'
'github_oauth_token_is_valid_stub')
github_oauth_token_is_valid_stub._set_return_value(True)
ct._github_oauth_token_is_valid = github_oauth_token_is_valid_stub
ct.run()
executed_command = stream_output_stub.calls()[0].args[1]
assert executed_command.find('config') > 0, 'did not see "config"'
assert executed_command.find('-g') > 0, 'did not see "-g"'
assert executed_command.find('github-oauth.github.com') > 0, \
'did not see "github-oauth.github.com"'
assert executed_command.find('"MADE_UP_TOKEN_VALUE"') > 0, \
'did not see "MADE_UP_TOKEN_VALUE"'
def test_run_does_not_set_github_oauth_if_missing(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'BP_DIR': '',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"rate": {"limit": 60, "remaining": 60}}""")
stream_output_stub = Dingus()
rewrite_stub = Dingus()
builder = Dingus(_ctx=ctx)
setup_composer_github_token_stub = Dingus()
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
'composer.extension.utils.rewrite_cfgs': rewrite_stub,
'composer.extension.ComposerExtension.setup_composer_github_token': setup_composer_github_token_stub
}):
ct = self.extension_module.ComposerExtension(ctx)
ct._builder = builder
ct.composer_runner = \
self.extension_module.ComposerCommandRunner(ctx, builder)
ct.run()
setup_composer_github_token_calls = setup_composer_github_token_stub.calls()
assert 0 == len(setup_composer_github_token_calls), \
'setup_composer_github_token() was called %s times, expected 0' % len(setup_composer_github_token_calls)
def test_github_oauth_token_is_valid_uses_curl(self):
ctx = utils.FormattedDict({
'BUILD_DIR': '/usr/awesome',
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"resources": {}}""")
stream_output_stub = Dingus(
'test_github_oauth_token_uses_curl : stream_output')
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
}):
ct = self.extension_module.ComposerExtension(ctx)
ct._github_oauth_token_is_valid('MADE_UP_TOKEN_VALUE')
executed_command = stream_output_stub.calls()[0].args[1]
assert stream_output_stub.calls().once(), \
'stream_output() was called more than once'
assert executed_command.find('curl') == 0, \
'Curl was not called, executed_command was %s' % executed_command
assert executed_command.find(
'-H "Authorization: token MADE_UP_TOKEN_VALUE"') > 0, \
'No token was passed to curl. Command was: %s' % executed_command
assert executed_command.find('https://api.github.com/rate_limit') > 0,\
'No URL was passed to curl. Command was: %s' % executed_command
def test_github_oauth_token_is_valid_interprets_github_api_200_as_true(self): # noqa
ctx = utils.FormattedDict({
'BUILD_DIR': tempfile.gettempdir(),
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"resources": {}}""")
stream_output_stub = Dingus(
'test_github_oauth_token_uses_curl : stream_output')
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
}):
ct = self.extension_module.ComposerExtension(ctx)
result = ct._github_oauth_token_is_valid('MADE_UP_TOKEN_VALUE')
assert result is True, \
'_github_oauth_token_is_valid returned %s, expected True' % result
def test_github_oauth_token_is_valid_interprets_github_api_401_as_false(self): # noqa
ctx = utils.FormattedDict({
'BUILD_DIR': tempfile.gettempdir(),
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{}""")
stream_output_stub = Dingus(
'test_github_oauth_token_uses_curl : stream_output')
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
}):
ct = self.extension_module.ComposerExtension(ctx)
result = ct._github_oauth_token_is_valid('MADE_UP_TOKEN_VALUE')
assert result is False, \
'_github_oauth_token_is_valid returned %s, expected False' % result
def test_no_github_api_call_with_cached_buildpack(self):
ctx = utils.FormattedDict({
'BUILD_DIR': tempfile.gettempdir(),
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'BP_DIR': '',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
builder = Dingus(_ctx=ctx)
path_exists_stub = Dingus()
path_exists_stub._set_return_value(True)
setup_composer_github_token_stub = Dingus()
check_github_rate_exceeded_stub = Dingus()
rewrite_stub = Dingus()
stream_output_stub = Dingus(
'test_github_oauth_token_uses_curl : stream_output')
with patches({
'os.path.exists': path_exists_stub,
'composer.extension.ComposerExtension.setup_composer_github_token': setup_composer_github_token_stub,
'composer.extension.ComposerExtension.check_github_rate_exceeded': check_github_rate_exceeded_stub,
'composer.extension.utils.rewrite_cfgs': rewrite_stub,
'composer.extension.stream_output': stream_output_stub
}):
ct = self.extension_module.ComposerExtension(ctx)
ct._builder = builder
ct.composer_runner = \
self.extension_module.ComposerCommandRunner(ctx, builder)
ct.run()
assert 0 == len(setup_composer_github_token_stub.calls()), \
'setup_composer_github_token was called, expected no calls'
assert 0 == len(check_github_rate_exceeded_stub.calls()), \
'check_github_rate_exceeded was called, expected no calls'
def test_github_download_rate_not_exceeded(self): # noqa
ctx = utils.FormattedDict({
'BUILD_DIR': tempfile.gettempdir(),
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"rate": {"limit": 60, "remaining": 60}}""")
stream_output_stub = Dingus(
'test_github_oauth_token_uses_curl : stream_output')
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
}):
ct = self.extension_module.ComposerExtension(ctx)
result = ct._github_rate_exceeded(False)
assert result is False, \
'_github_oauth_token_is_valid returned %s, expected False' % result
def test_github_download_rate_is_exceeded(self): # noqa
ctx = utils.FormattedDict({
'BUILD_DIR': tempfile.gettempdir(),
'PHP_VM': 'php',
'TMPDIR': tempfile.gettempdir(),
'LIBDIR': 'lib',
'CACHE_DIR': 'cache',
'WEBDIR': '',
'NO_WEBDIR_SET': False
})
instance_stub = Dingus()
instance_stub._set_return_value("""{"rate": {"limit": 60, "remaining": 0}}""")
stream_output_stub = Dingus(
'test_github_oauth_token_uses_curl : stream_output')
with patches({
'StringIO.StringIO.getvalue': instance_stub,
'composer.extension.stream_output': stream_output_stub,
}):
ct = self.extension_module.ComposerExtension(ctx)
result = ct._github_rate_exceeded(False)
assert result is True, \
'_github_oauth_token_is_valid returned %s, expected True' % result
| |
#!/usr/bin/env python
#
##############################################################################
### NZBGET POST-PROCESSING SCRIPT ###
# Converts files and passes them to Sonarr for further processing.
#
# NOTE: This script requires Python to be installed on your system.
##############################################################################
### OPTIONS ###
# Change to full path to MP4 Automator folder. No quotes and a trailing /
#MP4_FOLDER=~/sickbeard_mp4_automator/
# Convert file before passing to destination (True, False)
#SHOULDCONVERT=False
# Category for Couchpotato
#CP_CAT=Couchpotato
# Category for Sonarr
#SONARR_CAT=Sonarr
# Category for Radarr
#RADARR_CAT=Radarr
# Category for Sickbeard
#SICKBEARD_CAT=Sickbeard
# Category for Sickrage
#SICKRAGE_CAT=Sickrage
# Category for bypassing any further processing but still converting
#BYPASS_CAT=Bypass
# Custom output_directory setting
#OUTPUT_DIR=
### NZBGET POST-PROCESSING SCRIPT ###
##############################################################################
import os
import sys
import re
import json
import traceback
# Sanity checks for path string
MP4folder = os.environ['NZBPO_MP4_FOLDER'].strip()
MP4folder = MP4folder.replace('"', '')
MP4folder = MP4folder.replace("'", "")
MP4folder = MP4folder.replace("\\", "/")
if not(MP4folder.endswith("/")):
MP4folder += "/"
#DEBUG#print MP4folder+" the original is "+os.environ['NZBPO_MP4_FOLDER']
output_dir = None
if 'NZBPO_OUTPUT_DIR' in os.environ:
output_dir = os.environ['NZBPO_OUTPUT_DIR'].strip()
if len(output_dir) > 0:
output_dir = output_dir.replace('"', '')
output_dir = output_dir.replace("'", "")
output_dir = output_dir.replace("\\", "/")
if not(output_dir.endswith("/")):
output_dir += "/"
#DEBUG#print Overriding output directory
sys.path.append(MP4folder)
try:
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from autoprocess import autoProcessMovie, autoProcessTV, autoProcessTVSR, sonarr, radarr
import logging
from logging.config import fileConfig
except ImportError:
print("[ERROR] Wrong path to sickbeard_mp4_automator: " + os.environ['NZBPO_MP4_FOLDER'])
print("[ERROR] %s" % traceback.print_exc())
sys.exit(0)
# Setup Logging
logpath = '/var/log/sickbeard_mp4_automator'
if os.name == 'nt':
logpath = MP4folder
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = MP4folder
configPath = os.path.abspath(os.path.join(MP4folder, 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("NZBGetPostProcess")
# Determine if conversion will take place
shouldConvert = (os.environ['NZBPO_SHOULDCONVERT'].lower() in ("yes", "true", "t", "1"))
if 'NZBOP_SCRIPTDIR' in os.environ and not os.environ['NZBOP_VERSION'][0:5] < '11.0':
log.info("Script triggered from NZBGet (11.0 or later).")
path = os.environ['NZBPP_DIRECTORY'] # Path to NZB directory
nzb = os.environ['NZBPP_NZBFILENAME'] # Original NZB name
category = os.environ['NZBPP_CATEGORY'] # NZB Category to determine destination
#DEBUG#print "Category is %s." % category
couchcat = os.environ['NZBPO_CP_CAT'].lower()
sonarrcat = os.environ['NZBPO_SONARR_CAT'].lower()
radarrcat = os.environ['NZBPO_RADARR_CAT'].lower()
sickbeardcat = os.environ['NZBPO_SICKBEARD_CAT'].lower()
sickragecat = os.environ['NZBPO_SICKRAGE_CAT'].lower()
bypass = os.environ['NZBPO_BYPASS_CAT'].lower()
categories = [sickbeardcat, couchcat, sonarrcat, radarrcat, sickragecat, bypass]
log.debug("Path: %s" % path)
log.debug("NZB: %s" % nzb)
log.debug("Category: %s" % category)
log.debug("Categories: %s" % categories)
# NZBGet argv: all passed as environment variables.
clientAgent = "nzbget"
# Exit codes used by NZBGet
POSTPROCESS_PARCHECK = 92
POSTPROCESS_SUCCESS = 93
POSTPROCESS_ERROR = 94
POSTPROCESS_NONE = 95
# Check nzbget.conf options
status = 0
if os.environ['NZBOP_UNPACK'] != 'yes':
log.error("Please enable option \"Unpack\" in nzbget configuration file, exiting.")
sys.exit(POSTPROCESS_NONE)
# Check par status
if os.environ['NZBPP_PARSTATUS'] == '3':
log.error("Par-check successful, but Par-repair disabled, exiting")
sys.exit(POSTPROCESS_NONE)
if os.environ['NZBPP_PARSTATUS'] == '1':
log.error("Par-check failed, setting status \"failed\".")
status = 1
sys.exit(POSTPROCESS_NONE)
# Check unpack status
if os.environ['NZBPP_UNPACKSTATUS'] == '1':
log.error("Unpack failed, setting status \"failed\".")
status = 1
sys.exit(POSTPROCESS_NONE)
if os.environ['NZBPP_UNPACKSTATUS'] == '0' and os.environ['NZBPP_PARSTATUS'] != '2':
# Unpack is disabled or was skipped due to nzb-file properties or due to errors during par-check
for dirpath, dirnames, filenames in os.walk(os.environ['NZBPP_DIRECTORY']):
for file in filenames:
fileExtension = os.path.splitext(file)[1]
if fileExtension in ['.par2']:
log.error("Post-Process: Unpack skipped and par-check skipped (although par2-files exist), setting status \"failed\".")
status = 1
break
if os.path.isfile(os.path.join(os.environ['NZBPP_DIRECTORY'], "_brokenlog.txt")) and not status == 1:
log.error("Post-Process: _brokenlog.txt exists, download is probably damaged, exiting.")
status = 1
if not status == 1:
log.error("Neither par2-files found, _brokenlog.txt doesn't exist, considering download successful.")
# Check if destination directory exists (important for reprocessing of history items)
if not os.path.isdir(os.environ['NZBPP_DIRECTORY']):
log.error("Post-Process: Nothing to post-process: destination directory ", os.environ['NZBPP_DIRECTORY'], "doesn't exist.")
status = 1
sys.exit(POSTPROCESS_NONE)
# Make sure one of the appropriate categories is set
if category.lower() not in categories:
log.error("Post-Process: No valid category detected. Category was %s." % (category))
status = 1
sys.exit(POSTPROCESS_NONE)
# Make sure there are no duplicate categories
if len(categories) != len(set(categories)):
log.error("Duplicate category detected. Category names must be unique.")
status = 1
sys.exit(POSTPROCESS_NONE)
# All checks done, now launching the script.
settings = ReadSettings(MP4folder, "autoProcess.ini")
successful_process = False
if shouldConvert:
if output_dir:
settings.output_dir = output_dir
converter = MkvtoMp4(settings, logger=log)
biggest_file_size = 0
biggest_file_name = ""
m2ts_file = False
for r, d, f in os.walk(path):
for file in f:
filepath = os.path.join(r, file)
if filepath.endswith('.m2ts'): #m2ts files just screw up everything, but typically the largest file is the file that we want to convert.
m2ts_file = True
size = os.path.getsize(filepath)
if size > biggest_file_size:
biggest_file_size = size
biggest_file_name = filepath
for files in f:
inputfile = os.path.join(r, files)
if m2ts_file == True:
dir_name = os.path.dirname(os.path.realpath( biggest_file_name ))
inputfile = biggest_file_name
#DEBUG#print inputfile
#Ignores files under 50MB
if os.path.getsize(inputfile) > 50000000:
if MkvtoMp4(settings, logger=log).validSource(inputfile):
try:
output = converter.process(inputfile)
if output:
log.info("Successfully processed %s." % inputfile)
successful_process = True
else:
log.exception( "File Processing Failed" )
except:
log.exception("File processing failed.")
if m2ts_file == True:
filelist = [ f_r for f_r in os.listdir(dir_name) if f_r.endswith(".m2ts") ]
for f_r in filelist:
file_to_remove = os.path.join(r, f_r)
os.remove(file_to_remove)
break
if converter.output_dir:
path = converter.output_dir
if successful_process == False: #This means that there were no files that could be converted, i.e: probably an iso
sys.exit( POSTPROCESS_ERROR )
elif (category.lower() == categories[0]):
#DEBUG#print "Sickbeard Processing Activated"
autoProcessTV.processEpisode(path, settings, nzb)
sys.exit(POSTPROCESS_SUCCESS)
elif (category.lower() == categories[1]):
#DEBUG#print "CouchPotato Processing Activated"
autoProcessMovie.process(path, settings, nzb, status)
sys.exit(POSTPROCESS_SUCCESS)
elif (category.lower() == categories[2]):
#DEBUG#print "Sonarr Processing Activated"
success = sonarr.processEpisode(path, settings, True)
if success:
sys.exit(POSTPROCESS_SUCCESS)
else:
sys.exit(POSTPROCESS_ERROR)
elif (category.lower() == categories[3]):
#DEBUG#print "Radarr Processing Activated"
success = radarr.processMovie(path, settings, True)
if success:
sys.exit(POSTPROCESS_SUCCESS)
else:
sys.exit(POSTPROCESS_ERROR)
elif (category.lower() == categories[4]):
#DEBUG#print "Sickrage Processing Activated"
autoProcessTVSR.processEpisode(path, settings, nzb)
sys.exit(POSTPROCESS_SUCCESS)
elif (category.lower() == categories[5]):
#DEBUG#print "Bypass Further Processing"
sys.exit(POSTPROCESS_NONE)
else:
log.error("This script can only be called from NZBGet (11.0 or later).")
sys.exit(0)
| |
from zinc_request_processor import ZincRequestProcessor, ZincError
from format_price import format_price
from getpass import getpass
import sys
import os
import json
import re
from subprocess import call
import urllib
import urllib2
WELCOME_BANNER = """
____ ____ __
|_ _| |_ _| [ |
\ \ /\ / /.---. | | .---. .--. _ .--..--. .---.
\ \/ \/ // /_\ \ | | / /'`\]/ .'`\ \[ `.-. .-. |/ /_\ \
\ /\ / | \__., | | | \__. | \__. | | | | | | || \__.,
\/ \/ '.__.'[___]'.___.' '.__.' [___||__||__]'.__.'
_________
| _ _ |
|_/ | | \_|.--.
| | / .'`\ \
_| |_ | \__. |
________ |_____| '.__.' _
| __ _| (_) | |
|_/ / / __ _ .--. .---. | |
.'.' _ [ | [ `.-. | / /'`\]| |
_/ /__/ | | | | | | | | \__. |_|
|________|[___][___||__]'.___.'(_)
"""
class ValidationHelpers(object):
@classmethod
def validate_number(klass, maximum, minimum=0):
def validate(x):
if (int(x) <= maximum and int(x) >= minimum):
return True
else:
print "You must enter a number between %s and %s\n" % (minimum, maximum)
return validate
@classmethod
def validate_boolean(klass):
def validate(x):
if (x == "y" or x == "n" or x == ""):
return True
else:
print "You must enter either 'y', 'n', or nothing"
return validate
@classmethod
def validate_credit_card(klass):
def validate(card_number):
card_number = card_number.replace(' ','')
digit_sum = 0
num_digits = len(card_number)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(card_number[count])
if not (( count & 1 ) ^ oddeven ):
digit = digit * 2
if digit > 9:
digit = digit - 9
digit_sum += digit
if ( (digit_sum % 10) == 0 ):
return True
else:
print "Invalid credit card number"
return validate
class ZincWizard(object):
PROMPTS = {
"search_query": "What do you want to buy? (e.g. inflatable banana)",
"select_product_name": "Please select a result:",
"product_variants": "Please please enter a product URL.",
"product_quantity": "How many would you like to purchase? [1]",
"select_product_variants": "This item comes in multiple variants. Please choose an option.",
"select_shipping_methods": "This item has multiple shipping options. Please choose an option.",
"security_code": "Please enter your stored credit card's CVV security code.",
"place_order": "Would you like to place this order? [y]/n",
"gift": "Do you want this to be shipped as a gift? [y]/n",
"retailer_credentials": {
"email": "Please enter your Amazon username (email address)",
"password": "Please enter your Amazon password"
},
"shipping_address": {
"start_message": "\nPlease enter your shipping information. If you make a mistake, it may be corrected at the end of this section.\n",
"end_message": "\nYou've finished entering your shipping address!"
},
"address": {
"name": "Please input your full name:",
"address_line1": "Please input the first line of your address:",
"address_line2": "Please input the second line of your address: (optional)",
"city": "Please input your city:",
"state": "Please input your state (e.g. CA, MA, etc.):",
"zip_code": "Please input your zip code:",
"country": "Please input your country (e.g. US):",
"confirmation_message": "Is this your correct address? [y]/n"
},
"billing_address" : {
"start_message": "\nIs your billing address the same as your shipping address? [y]/n",
"end_message": "\nYou've finished entering you billing address!"
},
"credit_card": {
"start_message": "\nThe retailer requires a credit card for this purchase.",
"number": "Please input your credit card number (e.g. 5555555555554444)",
"expiration_month": "Please input your credit card expiration month (e.g. 03)",
"expiration_year": "Please input your credit card expiration year (e.g. 2017)",
"security_code": "Please input your card's CVV security code",
"end_message": "\nYou've finished entering your credit card information!"
},
"write_to_zincrc": "Would you like to write the information you just entered to a configuration file (~/.zincrc) so you can make orders more easily in the future? We'll only include your shipping address and a hashed credit card token, so no confidential information will be written to your hard drive. [y]/n"
}
def __init__(self,
retailer = "amazon",
client_token = "public",
filename = None,
gift = None,
):
self.retailer = retailer
self.client_token = client_token
self.stored_data = self.get_stored_data(filename)
self.gift = gift
self.response_data = {}
self.product_url = None
self.security_code = None
self.async_responses = {}
self.shipping_address = None
def get_stored_data(self, filename):
default_filename = os.path.join(os.path.expanduser("~"), ".zincrc")
if filename != None and os.path.isfile(filename):
with open(filename, 'rb') as f:
return json.loads(f.read())
elif os.path.isfile(default_filename):
with open(default_filename, 'rb') as f:
return json.loads(f.read())
return {}
def retrieve_data(self, key):
if key in self.stored_data:
return self.stored_data[key]
elif key == "shipping_address":
print self.PROMPTS[key]["start_message"]
return self.get_address(key)
elif key == "billing_address":
if self.prompt_boolean(self.PROMPTS[key]["start_message"]):
return self.shipping_address
else:
return self.get_address(key)
elif key == "credit_card":
return self.get_credit_card_information()
def start(self):
print WELCOME_BANNER
try:
self.start_interactive_session()
except ZincError as e:
self.print_indent("\nUnfortunately there seemed to be an error\n")
self.print_indent(str(e))
self.print_indent("\nRestarting...\n")
self.start_interactive_session()
def start_interactive_session(self):
self.get_product_name(self.response_data)
self.get_product_variants(self.response_data)
self.get_retailer_credentials(self.response_data)
self.get_shipping_methods(self.response_data)
self.get_store_card(self.response_data)
self.get_review_order(self.response_data)
self.get_place_order(self.response_data)
def prompt(self, prompt, validation=None, max_attempts=3, password=False):
attempts = 0
while True:
if password:
raw = getpass(prompt + "\n")
else:
raw = raw_input(prompt + "\n")
if (validation == None) or (validation != None and validation(raw)):
return raw
else:
attempts += 1
if attempts >= max_attempts:
print "You've reached the maximum number of attempts. Exiting!"
sys.exit()
def prompt_boolean(self, prompt, default=True):
result = self.prompt(prompt, ValidationHelpers.validate_boolean()).strip()
if (result == ""):
return default
elif (result == "y"):
return True
return False
def get_product_name(self, response_data):
search_query = self.prompt(self.PROMPTS["search_query"])
print "\nProcessing request...\n"
product_name_response = ZincRequestProcessor.process("search_products", {
"client_token": self.client_token,
"retailer": self.retailer,
"search_query": search_query
})
response_data["product_name_response"] = product_name_response
self.product_url = self.select_product_name(product_name_response)
def get_product_variants(self, response_data):
print "\nProcessing request...\n"
async_response = ZincRequestProcessor.process_async("variant_options", {
"client_token": self.client_token,
"retailer": self.retailer,
"product_url": self.product_url
})
asin = self.get_asin(self.product_url)
product_info = None
if asin != None:
InterfaceHelpers.print_ihmage_image(asin)
print "\nLoading product information...\n"
product_info = AmazonDataFinder.get_amazon_data(asin)
variants_response = async_response.get_response()
response_data["variant_options_response"] = variants_response
response_data["products"] = self.select_product_variants(variants_response, product_info)
def get_retailer_credentials(self, response_data):
print "\nProcessing request...\n"
email = self.prompt(self.PROMPTS["retailer_credentials"]["email"])
password = self.prompt(self.PROMPTS["retailer_credentials"]["password"], password=True)
response_data["retailer_credentials"] = {
"email": email,
"password": password
}
def get_shipping_methods(self, response_data):
self.shipping_address = self.retrieve_data("shipping_address")
print "\nProcessing request...\n"
shipping_response = ZincRequestProcessor.process_async("shipping_methods", {
"client_token": self.client_token,
"retailer": self.retailer,
"products": response_data["products"],
"shipping_address": self.shipping_address,
"retailer_credentials": response_data["retailer_credentials"]
})
self.async_responses["shipping_response"] = shipping_response
def get_store_card(self, response_data):
cc_token = self.retrieve_data("cc_token")
if cc_token == None:
cc_data = self.retrieve_data("credit_card")
self.billing_address = self.retrieve_data("billing_address")
print "\nProcessing request...\n"
store_card_response = ZincRequestProcessor.process("store_card", {
"client_token": self.client_token,
"retailer": self.retailer,
"billing_address": self.billing_address,
"number": cc_data["number"],
"expiration_month": cc_data["expiration_month"],
"expiration_year": cc_data["expiration_year"]
})
response_data["store_card_response"] = store_card_response
self.stored_data["cc_token"] = store_card_response["cc_token"]
def get_review_order(self, response_data):
shipping_method_id = self.select_shipping_methods(
self.async_responses["shipping_response"].get_response())
payment_method = {
"prefer_use_gift_balance": True,
"cc_token": self.retrieve_data("cc_token"),
"security_code": self.get_security_code()
}
is_gift = self.get_is_gift()
print "\nProcessing request...\n"
review_order_response = ZincRequestProcessor.process("review_order", {
"client_token": self.client_token,
"retailer": self.retailer,
"products": response_data["products"],
"shipping_address": self.shipping_address,
"is_gift": is_gift,
"shipping_method_id": shipping_method_id,
"payment_method": payment_method,
"customer_email": "support@zinc.io",
"retailer_credentials": response_data["retailer_credentials"]
})
response_data["review_order_response"] = review_order_response
def get_place_order(self, response_data):
self.write_to_zincrc()
self.print_price_components(response_data)
if self.prompt_boolean(self.PROMPTS["place_order"]):
print "\nProcessing request...\n"
place_order_response = ZincRequestProcessor.process("place_order", {
"client_token": self.client_token,
"place_order_key": response_data["review_order_response"]["place_order_key"]
})
response_data["place_order_response"] = place_order_response
print "HOORAY! You've successfully placed an order. Here are the details:\n"
print "Amazon Order Id: %s" % place_order_response["merchant_order"]["merchant_order_id"]
print "Total Price: %s" % format_price(place_order_response["price_components"]["total"])
print place_order_response["shipping_method"]["name"] + ": " + place_order_response["shipping_method"]["description"]
def print_price_components(self, response_data):
components = response_data["review_order_response"]["price_components"]
self.print_indent("Product Subtotal: %s" % format_price(components["subtotal"]))
self.print_indent("Shipping Cost: %s" % format_price(components["shipping"]))
self.print_indent("Tax: %s" % format_price(components["tax"]))
if "gift_certificate" in components:
self.print_indent("Gift Certificate: %s" % format_price(components["gift_certificate"]))
if "promotion" in components:
self.print_indent("Promotion: %s" % format_price(components["promotion"]))
self.print_indent("Total: %s" % format_price(components["total"]))
def write_to_zincrc(self):
filename = os.path.join(os.path.expanduser("~"), ".zincrc")
if not os.path.isfile(filename) and self.prompt_boolean(self.PROMPTS["write_to_zincrc"]):
data = {
"shipping_address": self.shipping_address,
"cc_token": self.retrieve_data("cc_token")
}
with open(filename, 'w+') as f:
f.write(json.dumps(data))
def get_asin(self, product_url):
match = re.search("/([a-zA-Z0-9]{10})(?:[/?]|$)", product_url)
if match:
return match.group(1)
def print_indent(self, value):
print " ", value
def get_is_gift(self):
if self.gift != None:
return self.gift
return self.prompt_boolean(self.PROMPTS["gift"])
def get_security_code(self):
if self.security_code != None:
return self.security_code
return self.prompt(self.PROMPTS["security_code"])
def get_credit_card_information(self):
print self.PROMPTS["credit_card"]["start_message"]
response = {}
response["number"] = self.prompt(self.PROMPTS["credit_card"]["number"],
ValidationHelpers.validate_credit_card())
response["expiration_month"] = self.prompt(self.PROMPTS["credit_card"]["expiration_month"],
ValidationHelpers.validate_number(12, 1))
response["expiration_year"] = self.prompt(self.PROMPTS["credit_card"]["expiration_year"],
ValidationHelpers.validate_number(2100, 2010))
self.security_code = self.prompt(self.PROMPTS["credit_card"]["security_code"])
print self.PROMPTS["credit_card"]["end_message"]
return response
def get_address(self, filetype):
address = {}
for label in ["name", "address_line1", "address_line2",
"city", "state", "zip_code", "country"]:
if label == "name":
full_name = self.prompt(self.PROMPTS["address"][label])
(first_name, last_name) = self.parse_name(full_name)
address["first_name"] = first_name
address["last_name"] = last_name
else:
address[label] = self.prompt(self.PROMPTS["address"][label])
print "\nYou typed the following:\n"
self.print_indent(" %s %s" % (address["first_name"], address["last_name"]))
self.print_indent(" %s" % (address["address_line1"]))
self.print_indent(" %s" % (address["address_line2"]))
self.print_indent(" %s, %s %s" % (address["city"], address["state"], address["zip_code"]))
self.print_indent(" %s" % (address["country"]))
print ""
if self.prompt_boolean(self.PROMPTS["address"]["confirmation_message"]):
print self.PROMPTS[filetype]["end_message"]
return address
else:
return self.get_address(filetype)
def parse_name(self, full_name):
split = full_name.split(" ")
if len(split) > 1:
first_name = " ".join(split[:(-1)])
last_name = split[-1]
return (first_name, last_name)
else:
return (".", full_name)
def build_prompt(self, base_prompt, description_list):
prompt = base_prompt + "\n"
prompt += "\n".join(description_list)
return prompt
def get_quantity(self):
quantity = self.prompt(self.PROMPTS["product_quantity"]).strip()
if quantity == "":
return 1
else:
return quantity
def select_product_name(self, response):
descriptions = []
collector = []
for i in xrange(len(response["results"])):
current = response["results"][i]
new_description = str(i) + ") " + current["title"].encode("ascii", "replace")
if "price" in current:
new_description += ", " + format_price(current["price"])
descriptions.append(new_description)
asin = self.get_asin(current["product_url"])
collector.append(current["product_url"])
prompt = self.build_prompt(self.PROMPTS["select_product_name"], descriptions)
collected_number = self.prompt(prompt,
ValidationHelpers.validate_number(len(descriptions)-1))
return collector[int(collected_number)]
def select_product_variants(self, variants_response, product_info):
descriptions = []
if product_info:
descriptions.append("\nDescription: " + product_info)
product_ids = []
if (len(variants_response["variant_options"]) > 1):
for i in xrange(len(variants_response["variant_options"])):
current_descriptions_list = []
current_option = variants_response["variant_options"][i]
for dimension in current_option["dimensions"]:
current_descriptions_list.append(dimension["name"] + ": " + dimension["value"])
if "unit_price" in current_option:
current_descriptions_list.append("Price: " + format_price(current_option["unit_price"]))
product_ids.append(current_option["product_id"])
descriptions.append(str(i) + ") " + ", ".join(current_descriptions_list))
prompt = self.build_prompt(self.PROMPTS["select_product_variants"], descriptions)
description_number = self.prompt(prompt,
ValidationHelpers.validate_number(len(descriptions)-1))
chosen_product_id = product_ids[int(description_number)]
else:
chosen_product_id = variants_response["variant_options"][0]["product_id"]
quantity = self.get_quantity()
return [{
"product_id": chosen_product_id,
"quantity": quantity
}]
def select_shipping_methods(self, shipping_response):
descriptions = []
shipping_ids = []
for i in xrange(len(shipping_response["shipping_methods"])):
current_method = shipping_response["shipping_methods"][i]
descriptions.append(str(i) + ") " + current_method["name"] + \
": " + current_method["description"])
shipping_ids.append(current_method["shipping_method_id"])
prompt = self.build_prompt(self.PROMPTS["select_shipping_methods"], descriptions)
description_number = self.prompt(prompt,
ValidationHelpers.validate_number(len(descriptions)-1))
chosen_id = shipping_ids[int(description_number)]
return chosen_id
class AmazonDataFinder(object):
@classmethod
def get_amazon_data(klass, asin):
url = 'http://bulkbuyingtools.com/index.php'
values = {'name' : 'Michael Foord',
's' : 'cb3dd4783d787a78a2c9a4e18b86a426',
'asin_list' : asin}
# first request.. make the session key associated with the asin
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req, timeout=2)
the_page = response.read()
loaded_page = re.search('This ASIN/ISBN number is invalid', \
the_page, flags=re.DOTALL|re.IGNORECASE)
if loaded_page is not None: # the ASIN was invalid
raise RuntimeError('loaded_page is None')
except:
return False
url = 'http://bulkbuyingtools.com/results.php?s=cb3dd4783d787a78a2c9a4e18b86a426'
req = urllib2.Request(url)
response = urllib2.urlopen(req)
the_page = response.read()
try:
ret_price = 0
ret_description = ''
price = re.search('<th>Image</th>.*?<th nowrap>Price:</th>\s*<td nowrap><b>\$(.*?)</b>', \
the_page, flags=re.DOTALL|re.IGNORECASE)
if price:
ret_price = price.group(1)
description = re.search('<th>Image</th>.*?<li>([^<]*?)(<a.*?</a>)?</li>\s*</ul>', \
the_page, flags=re.DOTALL|re.IGNORECASE)
if description:
ret_description = description.group(1)
return ret_description
except Exception, err:
return False
class InterfaceHelpers(object):
MAX_IHMAGE_WIDTH = 50
@classmethod
def get_terminal_size(klass):
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
### Use get(key[, default]) instead of a try/catch
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
@classmethod
def print_ihmage_image(klass, asin):
term_width = InterfaceHelpers.get_terminal_size()[0]
size = min(term_width/3, InterfaceHelpers.MAX_IHMAGE_WIDTH)
call(["curl", "http://ihmage.com/" + asin + "?size="+str(size)])
if __name__ == '__main__':
ZincWizard().start()
| |
from unittest import TestCase
import tempfile
import os
from wifi import Cell
from wifi.scheme import extract_schemes, Scheme
from wifi.exceptions import ConnectionError
NETWORK_INTERFACES_FILE = """
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
allow-hotplug eth0
iface eth0 inet dhcp
iface wlan0-work inet dhcp
wpa-ssid workwifi
wireless-channel auto
wpa-psk 1111111111111111111111111111111111111111111111111111111111111111
iface wlan0-coffee inet dhcp
wireless-essid Coffee WiFi
wireless-channel auto
iface wlan0-home inet dhcp
wpa-ssid homewifi
wpa-psk 2222222222222222222222222222222222222222222222222222222222222222
wireless-channel auto
iface wlan0-coffee2 inet dhcp
wireless-essid Coffee 2
wireless-channel auto
"""
class TestSchemes(TestCase):
def setUp(self):
self.tempfile, interfaces = tempfile.mkstemp()
with open(interfaces, 'w') as f:
f.write(NETWORK_INTERFACES_FILE)
self.Scheme = Scheme.for_file(interfaces)
def tearDown(self):
os.remove(self.Scheme.interfaces)
def test_scheme_extraction(self):
work, coffee, home, coffee2 = extract_schemes(NETWORK_INTERFACES_FILE)
assert work.name == 'work'
assert work.options['wpa-ssid'] == 'workwifi'
assert coffee.name == 'coffee'
assert coffee.options['wireless-essid'] == 'Coffee WiFi'
def test_str(self):
scheme = self.Scheme('wlan0', 'test')
assert str(scheme) == 'iface wlan0-test inet dhcp\n'
scheme = self.Scheme('wlan0', 'test', {
'wpa-ssid': 'workwifi',
})
self.assertEqual(str(scheme), 'iface wlan0-test inet dhcp\n wpa-ssid workwifi\n')
def test_find(self):
work = self.Scheme.find('wlan0', 'work')
assert work.options['wpa-ssid'] == 'workwifi'
def test_delete(self):
work = self.Scheme.find('wlan0', 'work')
work.delete()
self.assertIsNone(self.Scheme.find('wlan0', 'work'))
assert self.Scheme.find('wlan0', 'coffee')
def test_save(self):
scheme = self.Scheme('wlan0', 'test')
scheme.save()
assert self.Scheme.find('wlan0', 'test')
class TestActivation(TestCase):
def test_successful_connection(self):
scheme = Scheme('wlan0', 'test')
connection = scheme.parse_ifup_output(SUCCESSFUL_IFUP_OUTPUT)
self.assertEqual(connection.scheme, scheme)
self.assertEqual(connection.ip_address, '192.168.1.113')
def test_failed_connection(self):
scheme = Scheme('wlan0', 'test')
self.assertRaises(ConnectionError, scheme.parse_ifup_output, FAILED_IFUP_OUTPUT)
class TestForCell(TestCase):
def test_unencrypted(self):
cell = Cell()
cell.ssid = 'SSID'
cell.encrypted = False
scheme = Scheme.for_cell('wlan0', 'test', cell)
self.assertEqual(scheme.options, {
'wireless-essid': 'SSID',
'wireless-channel': 'auto',
})
def test_wep_hex(self):
cell = Cell()
cell.ssid = 'SSID'
cell.encrypted = True
cell.encryption_type = 'wep'
# hex key lengths: 10, 26, 32, 58
hex_keys = ("01234567ab", "0123456789abc" * 2, "0123456789abcdef" * 2, "0123456789abc" * 2 + "0123456789abcdef" * 2)
for key in hex_keys:
scheme = Scheme.for_cell('wlan0', 'test', cell, key)
self.assertEqual(scheme.options, {
'wireless-essid': 'SSID',
'wireless-key': key
})
def test_wep_ascii(self):
cell = Cell()
cell.ssid = 'SSID'
cell.encrypted = True
cell.encryption_type = 'wep'
# ascii key lengths: 5, 13, 16, 29
ascii_keys = ('a' * 5, 'a' * 13, 'a' * 16, 'a' * 29)
for key in ascii_keys:
scheme = Scheme.for_cell('wlan0', 'test', cell, key)
self.assertEqual(scheme.options, {
'wireless-essid': 'SSID',
'wireless-key': 's:' + key
})
def test_wpa2(self):
cell = Cell()
cell.ssid = 'SSID'
cell.encrypted = True
cell.encryption_type = 'wpa2'
scheme = Scheme.for_cell('wlan0', 'test', cell, b'passkey')
self.assertEqual(scheme.options, {
'wpa-ssid': 'SSID',
'wpa-psk': 'ea1548d4e8850c8d94c5ef9ed6fe483981b64c1436952cb1bf80c08a68cdc763',
'wireless-channel': 'auto',
})
def test_wpa(self):
cell = Cell()
cell.ssid = 'SSID'
cell.encrypted = True
cell.encryption_type = 'wpa'
scheme = Scheme.for_cell('wlan0', 'test', cell, 'passkey')
self.assertEqual(scheme.options, {
'wpa-ssid': 'SSID',
'wpa-psk': 'ea1548d4e8850c8d94c5ef9ed6fe483981b64c1436952cb1bf80c08a68cdc763',
'wireless-channel': 'auto',
})
SUCCESSFUL_IFDOWN_OUTPUT = """Internet Systems Consortium DHCP Client 4.2.4
Copyright 2004-2012 Internet Systems Consortium.
All rights reserved.
For info, please visit https://www.isc.org/software/dhcp/
Listening on LPF/wlan0/9c:4e:36:5d:2c:64
Sending on LPF/wlan0/9c:4e:36:5d:2c:64
Sending on Socket/fallback
DHCPRELEASE on wlan0 to 192.168.1.1 port 67
"""
SUCCESSFUL_IFUP_OUTPUT = """Internet Systems Consortium DHCP Client 4.2.4
Copyright 2004-2012 Internet Systems Consortium.
All rights reserved.
For info, please visit https://www.isc.org/software/dhcp/
Listening on LPF/wlan0/9c:4e:36:5d:2c:64
Sending on LPF/wlan0/9c:4e:36:5d:2c:64
Sending on Socket/fallback
DHCPDISCOVER on wlan0 to 255.255.255.255 port 67 interval 4
DHCPDISCOVER on wlan0 to 255.255.255.255 port 67 interval 8
DHCPREQUEST on wlan0 to 255.255.255.255 port 67
DHCPOFFER from 192.168.1.1
DHCPACK from 192.168.1.1
bound to 192.168.1.113 -- renewal in 2776 seconds.
"""
FAILED_IFUP_OUTPUT = """Internet Systems Consortium DHCP Client 4.2.4
Copyright 2004-2012 Internet Systems Consortium.
All rights reserved.
For info, please visit https://www.isc.org/software/dhcp/
Listening on LPF/wlan0/9c:4e:36:5d:2c:64
Sending on LPF/wlan0/9c:4e:36:5d:2c:64
Sending on Socket/fallback
DHCPDISCOVER on wlan0 to 255.255.255.255 port 67 interval 5
DHCPDISCOVER on wlan0 to 255.255.255.255 port 67 interval 8
DHCPDISCOVER on wlan0 to 255.255.255.255 port 67 interval 18
DHCPDISCOVER on wlan0 to 255.255.255.255 port 67 interval 18
DHCPDISCOVER on wlan0 to 255.255.255.255 port 67 interval 12
No DHCPOFFERS received.
No working leases in persistent database - sleeping.
"""
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Face interface compliance of the gRPC Python Beta API."""
import threading
import unittest
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities
from tests.unit import resources
from tests.unit.beta import test_utilities
from tests.unit.framework.common import test_constants
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
_PER_RPC_CREDENTIALS_METADATA_KEY = 'my-call-credentials-metadata-key'
_PER_RPC_CREDENTIALS_METADATA_VALUE = 'my-call-credentials-metadata-value'
_GROUP = 'group'
_UNARY_UNARY = 'unary-unary'
_UNARY_STREAM = 'unary-stream'
_STREAM_UNARY = 'stream-unary'
_STREAM_STREAM = 'stream-stream'
_REQUEST = b'abc'
_RESPONSE = b'123'
class _Servicer(object):
def __init__(self):
self._condition = threading.Condition()
self._peer = None
self._serviced = False
def unary_unary(self, request, context):
with self._condition:
self._request = request
self._peer = context.protocol_context().peer()
self._invocation_metadata = context.invocation_metadata()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return _RESPONSE
def unary_stream(self, request, context):
with self._condition:
self._request = request
self._peer = context.protocol_context().peer()
self._invocation_metadata = context.invocation_metadata()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return
yield
def stream_unary(self, request_iterator, context):
for request in request_iterator:
self._request = request
with self._condition:
self._peer = context.protocol_context().peer()
self._invocation_metadata = context.invocation_metadata()
context.protocol_context().disable_next_response_compression()
self._serviced = True
self._condition.notify_all()
return _RESPONSE
def stream_stream(self, request_iterator, context):
for request in request_iterator:
with self._condition:
self._peer = context.protocol_context().peer()
context.protocol_context().disable_next_response_compression()
yield _RESPONSE
with self._condition:
self._invocation_metadata = context.invocation_metadata()
self._serviced = True
self._condition.notify_all()
def peer(self):
with self._condition:
return self._peer
def block_until_serviced(self):
with self._condition:
while not self._serviced:
self._condition.wait()
class _BlockingIterator(object):
def __init__(self, upstream):
self._condition = threading.Condition()
self._upstream = upstream
self._allowed = []
def __iter__(self):
return self
def next(self):
with self._condition:
while True:
if self._allowed is None:
raise StopIteration()
elif self._allowed:
return self._allowed.pop(0)
else:
self._condition.wait()
def allow(self):
with self._condition:
try:
self._allowed.append(next(self._upstream))
except StopIteration:
self._allowed = None
self._condition.notify_all()
def _metadata_plugin(context, callback):
callback([(_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE)], None)
class BetaFeaturesTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
method_implementations = {
(_GROUP, _UNARY_UNARY):
utilities.unary_unary_inline(self._servicer.unary_unary),
(_GROUP, _UNARY_STREAM):
utilities.unary_stream_inline(self._servicer.unary_stream),
(_GROUP, _STREAM_UNARY):
utilities.stream_unary_inline(self._servicer.stream_unary),
(_GROUP, _STREAM_STREAM):
utilities.stream_stream_inline(self._servicer.stream_stream),
}
cardinalities = {
_UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
_UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
_STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
_STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
}
server_options = implementations.server_options(
thread_pool_size=test_constants.POOL_SIZE)
self._server = implementations.server(
method_implementations, options=server_options)
server_credentials = implementations.ssl_server_credentials(
[(resources.private_key(), resources.certificate_chain(),),])
port = self._server.add_secure_port('[::]:0', server_credentials)
self._server.start()
self._channel_credentials = implementations.ssl_channel_credentials(
resources.test_root_certificates(), None, None)
self._call_credentials = implementations.metadata_call_credentials(
_metadata_plugin)
channel = test_utilities.not_really_secure_channel(
'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
stub_options = implementations.stub_options(
thread_pool_size=test_constants.POOL_SIZE)
self._dynamic_stub = implementations.dynamic_stub(
channel, _GROUP, cardinalities, options=stub_options)
def tearDown(self):
self._dynamic_stub = None
self._server.stop(test_constants.SHORT_TIMEOUT).wait()
def test_unary_unary(self):
call_options = interfaces.grpc_call_options(
disable_compression=True, credentials=self._call_credentials)
response = getattr(self._dynamic_stub, _UNARY_UNARY)(
_REQUEST, test_constants.LONG_TIMEOUT, protocol_options=call_options)
self.assertEqual(_RESPONSE, response)
self.assertIsNotNone(self._servicer.peer())
invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
self._servicer._invocation_metadata]
self.assertIn(
(_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
def test_unary_stream(self):
call_options = interfaces.grpc_call_options(
disable_compression=True, credentials=self._call_credentials)
response_iterator = getattr(self._dynamic_stub, _UNARY_STREAM)(
_REQUEST, test_constants.LONG_TIMEOUT, protocol_options=call_options)
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
self._servicer._invocation_metadata]
self.assertIn(
(_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
def test_stream_unary(self):
call_options = interfaces.grpc_call_options(
credentials=self._call_credentials)
request_iterator = _BlockingIterator(iter((_REQUEST,)))
response_future = getattr(self._dynamic_stub, _STREAM_UNARY).future(
request_iterator, test_constants.LONG_TIMEOUT,
protocol_options=call_options)
response_future.protocol_context().disable_next_request_compression()
request_iterator.allow()
response_future.protocol_context().disable_next_request_compression()
request_iterator.allow()
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
self.assertEqual(_RESPONSE, response_future.result())
invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
self._servicer._invocation_metadata]
self.assertIn(
(_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
def test_stream_stream(self):
call_options = interfaces.grpc_call_options(
credentials=self._call_credentials)
request_iterator = _BlockingIterator(iter((_REQUEST,)))
response_iterator = getattr(self._dynamic_stub, _STREAM_STREAM)(
request_iterator, test_constants.SHORT_TIMEOUT,
protocol_options=call_options)
response_iterator.protocol_context().disable_next_request_compression()
request_iterator.allow()
response = next(response_iterator)
response_iterator.protocol_context().disable_next_request_compression()
request_iterator.allow()
self._servicer.block_until_serviced()
self.assertIsNotNone(self._servicer.peer())
self.assertEqual(_RESPONSE, response)
invocation_metadata = [(metadatum.key, metadatum.value) for metadatum in
self._servicer._invocation_metadata]
self.assertIn(
(_PER_RPC_CREDENTIALS_METADATA_KEY,
_PER_RPC_CREDENTIALS_METADATA_VALUE),
invocation_metadata)
class ContextManagementAndLifecycleTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
self._method_implementations = {
(_GROUP, _UNARY_UNARY):
utilities.unary_unary_inline(self._servicer.unary_unary),
(_GROUP, _UNARY_STREAM):
utilities.unary_stream_inline(self._servicer.unary_stream),
(_GROUP, _STREAM_UNARY):
utilities.stream_unary_inline(self._servicer.stream_unary),
(_GROUP, _STREAM_STREAM):
utilities.stream_stream_inline(self._servicer.stream_stream),
}
self._cardinalities = {
_UNARY_UNARY: cardinality.Cardinality.UNARY_UNARY,
_UNARY_STREAM: cardinality.Cardinality.UNARY_STREAM,
_STREAM_UNARY: cardinality.Cardinality.STREAM_UNARY,
_STREAM_STREAM: cardinality.Cardinality.STREAM_STREAM,
}
self._server_options = implementations.server_options(
thread_pool_size=test_constants.POOL_SIZE)
self._server_credentials = implementations.ssl_server_credentials(
[(resources.private_key(), resources.certificate_chain(),),])
self._channel_credentials = implementations.ssl_channel_credentials(
resources.test_root_certificates(), None, None)
self._stub_options = implementations.stub_options(
thread_pool_size=test_constants.POOL_SIZE)
def test_stub_context(self):
server = implementations.server(
self._method_implementations, options=self._server_options)
port = server.add_secure_port('[::]:0', self._server_credentials)
server.start()
channel = test_utilities.not_really_secure_channel(
'localhost', port, self._channel_credentials, _SERVER_HOST_OVERRIDE)
dynamic_stub = implementations.dynamic_stub(
channel, _GROUP, self._cardinalities, options=self._stub_options)
for _ in range(100):
with dynamic_stub:
pass
for _ in range(10):
with dynamic_stub:
call_options = interfaces.grpc_call_options(
disable_compression=True)
response = getattr(dynamic_stub, _UNARY_UNARY)(
_REQUEST, test_constants.LONG_TIMEOUT,
protocol_options=call_options)
self.assertEqual(_RESPONSE, response)
self.assertIsNotNone(self._servicer.peer())
server.stop(test_constants.SHORT_TIMEOUT).wait()
def test_server_lifecycle(self):
for _ in range(100):
server = implementations.server(
self._method_implementations, options=self._server_options)
port = server.add_secure_port('[::]:0', self._server_credentials)
server.start()
server.stop(test_constants.SHORT_TIMEOUT).wait()
for _ in range(100):
server = implementations.server(
self._method_implementations, options=self._server_options)
server.add_secure_port('[::]:0', self._server_credentials)
server.add_insecure_port('[::]:0')
with server:
server.stop(test_constants.SHORT_TIMEOUT)
server.stop(test_constants.SHORT_TIMEOUT)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Running or runtime configuration base classes.
"""
from abc import ABCMeta
from abc import abstractmethod
import functools
import numbers
import logging
import uuid
import six
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.base import get_validator
from ryu.services.protocols.bgp.base import RUNTIME_CONF_ERROR_CODE
from ryu.services.protocols.bgp.base import validate
from ryu.services.protocols.bgp.utils import validation
from ryu.services.protocols.bgp.utils.validation import is_valid_asn
LOG = logging.getLogger('bgpspeaker.rtconf.base')
#
# Nested settings.
#
CAP_REFRESH = 'cap_refresh'
CAP_ENHANCED_REFRESH = 'cap_enhanced_refresh'
CAP_FOUR_OCTET_AS_NUMBER = 'cap_four_octet_as_number'
CAP_MBGP_IPV4 = 'cap_mbgp_ipv4'
CAP_MBGP_IPV6 = 'cap_mbgp_ipv6'
CAP_MBGP_VPNV4 = 'cap_mbgp_vpnv4'
CAP_MBGP_VPNV6 = 'cap_mbgp_vpnv6'
CAP_MBGP_EVPN = 'cap_mbgp_evpn'
CAP_MBGP_IPV4FS = 'cap_mbgp_ipv4fs'
CAP_MBGP_VPNV4FS = 'cap_mbgp_vpnv4fs'
CAP_RTC = 'cap_rtc'
RTC_AS = 'rtc_as'
HOLD_TIME = 'hold_time'
# To control how many prefixes can be received from a neighbor.
# 0 value indicates no limit and other related options will be ignored.
# Current behavior is to log that limit has reached.
MAX_PREFIXES = 'max_prefixes'
# Has same meaning as: http://www.juniper.net/techpubs/software/junos/junos94
# /swconfig-routing/disabling-suppression-of-route-
# advertisements.html#id-13255463
ADVERTISE_PEER_AS = 'advertise_peer_as'
# MED - MULTI_EXIT_DISC
MULTI_EXIT_DISC = 'multi_exit_disc'
# Extended community attribute route origin.
SITE_OF_ORIGINS = 'site_of_origins'
# Constants related to errors.
CONF_NAME = 'conf_name'
CONF_VALUE = 'conf_value'
# Max. value limits
MAX_NUM_IMPORT_RT = 1000
MAX_NUM_EXPORT_RT = 250
MAX_NUM_SOO = 10
# =============================================================================
# Runtime configuration errors or exceptions.
# =============================================================================
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=1,
def_desc='Error with runtime-configuration.')
class RuntimeConfigError(BGPSException):
"""Base class for all runtime configuration errors.
"""
pass
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=2,
def_desc='Missing required configuration.')
class MissingRequiredConf(RuntimeConfigError):
"""Exception raised when trying to configure with missing required
settings.
"""
def __init__(self, **kwargs):
conf_name = kwargs.get('conf_name')
if conf_name:
super(MissingRequiredConf, self).__init__(
desc='Missing required configuration: %s' % conf_name)
else:
super(MissingRequiredConf, self).__init__(desc=kwargs.get('desc'))
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=3,
def_desc='Incorrect Type for configuration.')
class ConfigTypeError(RuntimeConfigError):
"""Exception raised when configuration value type miss-match happens.
"""
def __init__(self, **kwargs):
conf_name = kwargs.get(CONF_NAME)
conf_value = kwargs.get(CONF_VALUE)
if conf_name and conf_value:
super(ConfigTypeError, self).__init__(
desc='Incorrect Type %s for configuration: %s' %
(conf_value, conf_name))
elif conf_name:
super(ConfigTypeError, self).__init__(
desc='Incorrect Type for configuration: %s' % conf_name)
else:
super(ConfigTypeError, self).__init__(desc=kwargs.get('desc'))
@add_bgp_error_metadata(code=RUNTIME_CONF_ERROR_CODE, sub_code=4,
def_desc='Incorrect Value for configuration.')
class ConfigValueError(RuntimeConfigError):
"""Exception raised when configuration value is of correct type but
incorrect value.
"""
def __init__(self, **kwargs):
conf_name = kwargs.get(CONF_NAME)
conf_value = kwargs.get(CONF_VALUE)
if conf_name and conf_value:
super(ConfigValueError, self).__init__(
desc='Incorrect Value %s for configuration: %s' %
(conf_value, conf_name))
elif conf_name:
super(ConfigValueError, self).__init__(
desc='Incorrect Value for configuration: %s' % conf_name)
else:
super(ConfigValueError, self).__init__(desc=kwargs.get('desc'))
# =============================================================================
# Configuration base classes.
# =============================================================================
@six.add_metaclass(ABCMeta)
class BaseConf(object):
"""Base class for a set of configuration values.
Configurations can be required or optional. Also acts as a container of
configuration change listeners.
"""
def __init__(self, **kwargs):
self._req_settings = self.get_req_settings()
self._opt_settings = self.get_opt_settings()
self._valid_evts = self.get_valid_evts()
self._listeners = {}
self._settings = {}
# validate required and unknown settings
self._validate_req_unknown_settings(**kwargs)
# Initialize configuration settings.
self._init_req_settings(**kwargs)
self._init_opt_settings(**kwargs)
@property
def settings(self):
"""Returns a copy of current settings."""
return self._settings.copy()
@classmethod
def get_valid_evts(cls):
return set()
@classmethod
def get_req_settings(cls):
return set()
@classmethod
def get_opt_settings(cls):
return set()
@abstractmethod
def _init_opt_settings(self, **kwargs):
"""Sub-classes should override this method to initialize optional
settings.
"""
pass
@abstractmethod
def update(self, **kwargs):
# Validate given values
self._validate_req_unknown_settings(**kwargs)
def _validate_req_unknown_settings(self, **kwargs):
"""Checks if required settings are present.
Also checks if unknown requirements are present.
"""
# Validate given configuration.
self._all_attrs = (self._req_settings | self._opt_settings)
if not kwargs and len(self._req_settings) > 0:
raise MissingRequiredConf(desc='Missing all required attributes.')
given_attrs = frozenset(kwargs.keys())
unknown_attrs = given_attrs - self._all_attrs
if unknown_attrs:
raise RuntimeConfigError(desc=(
'Unknown attributes: %s' %
', '.join([str(i) for i in unknown_attrs])))
missing_req_settings = self._req_settings - given_attrs
if missing_req_settings:
raise MissingRequiredConf(conf_name=list(missing_req_settings))
def _init_req_settings(self, **kwargs):
for req_attr in self._req_settings:
req_attr_value = kwargs.get(req_attr)
if req_attr_value is None:
raise MissingRequiredConf(conf_name=req_attr_value)
# Validate attribute value
req_attr_value = get_validator(req_attr)(req_attr_value)
self._settings[req_attr] = req_attr_value
def add_listener(self, evt, callback):
# if (evt not in self.get_valid_evts()):
# raise RuntimeConfigError(desc=('Unknown event %s' % evt))
listeners = self._listeners.get(evt, None)
if not listeners:
listeners = set()
self._listeners[evt] = listeners
listeners.update([callback])
def remove_listener(self, evt, callback):
if evt in self.get_valid_evts():
listeners = self._listeners.get(evt, None)
if listeners and (callback in listeners):
listeners.remove(callback)
return True
return False
def _notify_listeners(self, evt, value):
listeners = self._listeners.get(evt, [])
for callback in listeners:
callback(ConfEvent(self, evt, value))
def __repr__(self):
return '%s(%r)' % (self.__class__, self._settings)
class ConfWithId(BaseConf):
"""Configuration settings related to identity."""
# Config./resource identifier.
ID = 'id'
# Config./resource name.
NAME = 'name'
# Config./resource description.
DESCRIPTION = 'description'
UPDATE_NAME_EVT = 'update_name_evt'
UPDATE_DESCRIPTION_EVT = 'update_description_evt'
VALID_EVT = frozenset([UPDATE_NAME_EVT, UPDATE_DESCRIPTION_EVT])
OPTIONAL_SETTINGS = frozenset([ID, NAME, DESCRIPTION])
def __init__(self, **kwargs):
super(ConfWithId, self).__init__(**kwargs)
@classmethod
def get_opt_settings(cls):
self_confs = super(ConfWithId, cls).get_opt_settings()
self_confs.update(ConfWithId.OPTIONAL_SETTINGS)
return self_confs
@classmethod
def get_req_settings(cls):
self_confs = super(ConfWithId, cls).get_req_settings()
return self_confs
@classmethod
def get_valid_evts(cls):
self_valid_evts = super(ConfWithId, cls).get_valid_evts()
self_valid_evts.update(ConfWithId.VALID_EVT)
return self_valid_evts
def _init_opt_settings(self, **kwargs):
super(ConfWithId, self)._init_opt_settings(**kwargs)
self._settings[ConfWithId.ID] = \
compute_optional_conf(ConfWithId.ID, str(uuid.uuid4()), **kwargs)
self._settings[ConfWithId.NAME] = \
compute_optional_conf(ConfWithId.NAME, str(self), **kwargs)
self._settings[ConfWithId.DESCRIPTION] = \
compute_optional_conf(ConfWithId.DESCRIPTION, str(self), **kwargs)
@property
def id(self):
return self._settings[ConfWithId.ID]
@property
def name(self):
return self._settings[ConfWithId.NAME]
@name.setter
def name(self, new_name):
old_name = self.name
if not new_name:
new_name = repr(self)
else:
get_validator(ConfWithId.NAME)(new_name)
if old_name != new_name:
self._settings[ConfWithId.NAME] = new_name
self._notify_listeners(ConfWithId.UPDATE_NAME_EVT,
(old_name, self.name))
@property
def description(self):
return self._settings[ConfWithId.DESCRIPTION]
@description.setter
def description(self, new_description):
old_desc = self.description
if not new_description:
new_description = str(self)
else:
get_validator(ConfWithId.DESCRIPTION)(new_description)
if old_desc != new_description:
self._settings[ConfWithId.DESCRIPTION] = new_description
self._notify_listeners(ConfWithId.UPDATE_DESCRIPTION_EVT,
(old_desc, self.description))
def update(self, **kwargs):
# Update inherited configurations
super(ConfWithId, self).update(**kwargs)
self.name = compute_optional_conf(ConfWithId.NAME,
str(self),
**kwargs)
self.description = compute_optional_conf(ConfWithId.DESCRIPTION,
str(self),
**kwargs)
class ConfWithStats(BaseConf):
"""Configuration settings related to statistics collection."""
# Enable or disable statistics logging.
STATS_LOG_ENABLED = 'statistics_log_enabled'
DEFAULT_STATS_LOG_ENABLED = False
# Statistics logging time.
STATS_TIME = 'statistics_interval'
DEFAULT_STATS_TIME = 60
UPDATE_STATS_LOG_ENABLED_EVT = 'update_stats_log_enabled_evt'
UPDATE_STATS_TIME_EVT = 'update_stats_time_evt'
VALID_EVT = frozenset([UPDATE_STATS_LOG_ENABLED_EVT,
UPDATE_STATS_TIME_EVT])
OPTIONAL_SETTINGS = frozenset([STATS_LOG_ENABLED, STATS_TIME])
def __init__(self, **kwargs):
super(ConfWithStats, self).__init__(**kwargs)
def _init_opt_settings(self, **kwargs):
super(ConfWithStats, self)._init_opt_settings(**kwargs)
self._settings[ConfWithStats.STATS_LOG_ENABLED] = \
compute_optional_conf(ConfWithStats.STATS_LOG_ENABLED,
ConfWithStats.DEFAULT_STATS_LOG_ENABLED,
**kwargs)
self._settings[ConfWithStats.STATS_TIME] = \
compute_optional_conf(ConfWithStats.STATS_TIME,
ConfWithStats.DEFAULT_STATS_TIME,
**kwargs)
@property
def stats_log_enabled(self):
return self._settings[ConfWithStats.STATS_LOG_ENABLED]
@stats_log_enabled.setter
def stats_log_enabled(self, enabled):
get_validator(ConfWithStats.STATS_LOG_ENABLED)(enabled)
if enabled != self.stats_log_enabled:
self._settings[ConfWithStats.STATS_LOG_ENABLED] = enabled
self._notify_listeners(ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
enabled)
@property
def stats_time(self):
return self._settings[ConfWithStats.STATS_TIME]
@stats_time.setter
def stats_time(self, stats_time):
get_validator(ConfWithStats.STATS_TIME)(stats_time)
if stats_time != self.stats_time:
self._settings[ConfWithStats.STATS_TIME] = stats_time
self._notify_listeners(ConfWithStats.UPDATE_STATS_TIME_EVT,
stats_time)
@classmethod
def get_opt_settings(cls):
confs = super(ConfWithStats, cls).get_opt_settings()
confs.update(ConfWithStats.OPTIONAL_SETTINGS)
return confs
@classmethod
def get_valid_evts(cls):
valid_evts = super(ConfWithStats, cls).get_valid_evts()
valid_evts.update(ConfWithStats.VALID_EVT)
return valid_evts
def update(self, **kwargs):
# Update inherited configurations
super(ConfWithStats, self).update(**kwargs)
self.stats_log_enabled = \
compute_optional_conf(ConfWithStats.STATS_LOG_ENABLED,
ConfWithStats.DEFAULT_STATS_LOG_ENABLED,
**kwargs)
self.stats_time = \
compute_optional_conf(ConfWithStats.STATS_TIME,
ConfWithStats.DEFAULT_STATS_TIME,
**kwargs)
@six.add_metaclass(ABCMeta)
class BaseConfListener(object):
"""Base class of all configuration listeners."""
def __init__(self, base_conf):
pass
# TODO(PH): re-vist later and check if we need this check
# if not isinstance(base_conf, BaseConf):
# raise TypeError('Currently we only support listening to '
# 'instances of BaseConf')
class ConfWithIdListener(BaseConfListener):
def __init__(self, conf_with_id):
assert conf_with_id
super(ConfWithIdListener, self).__init__(conf_with_id)
conf_with_id.add_listener(ConfWithId.UPDATE_NAME_EVT,
self.on_chg_name_conf_with_id)
conf_with_id.add_listener(ConfWithId.UPDATE_DESCRIPTION_EVT,
self.on_chg_desc_conf_with_id)
def on_chg_name_conf_with_id(self, conf_evt):
# Note did not makes this method abstract as this is not important
# event.
raise NotImplementedError()
def on_chg_desc_conf_with_id(self, conf_evt):
# Note did not makes this method abstract as this is not important
# event.
raise NotImplementedError()
class ConfWithStatsListener(BaseConfListener):
def __init__(self, conf_with_stats):
assert conf_with_stats
super(ConfWithStatsListener, self).__init__(conf_with_stats)
conf_with_stats.add_listener(
ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
self.on_chg_stats_enabled_conf_with_stats)
conf_with_stats.add_listener(ConfWithStats.UPDATE_STATS_TIME_EVT,
self.on_chg_stats_time_conf_with_stats)
@abstractmethod
def on_chg_stats_time_conf_with_stats(self, conf_evt):
raise NotImplementedError()
@abstractmethod
def on_chg_stats_enabled_conf_with_stats(self, conf_evt):
raise NotImplementedError()
@functools.total_ordering
class ConfEvent(object):
"""Encapsulates configuration settings change/update event."""
def __init__(self, evt_src, evt_name, evt_value):
"""Creates an instance using given parameters.
Parameters:
-`evt_src`: (BaseConf) source of the event
-`evt_name`: (str) name of event, has to be one of the valid
event of `evt_src`
- `evt_value`: (tuple) event context that helps event handler
"""
if evt_name not in evt_src.get_valid_evts():
raise ValueError('Event %s is not a valid event for type %s.' %
(evt_name, type(evt_src)))
self._src = evt_src
self._name = evt_name
self._value = evt_value
@property
def src(self):
return self._src
@property
def name(self):
return self._name
@property
def value(self):
return self._value
def __repr__(self):
return '<ConfEvent(%s, %s, %s)>' % (self.src, self.name, self.value)
def __str__(self):
return ('ConfEvent(src=%s, name=%s, value=%s)' %
(self.src, self.name, self.value))
def __lt__(self, other):
return ((self.src, self.name, self.value) <
(other.src, other.name, other.value))
def __eq__(self, other):
return ((self.src, self.name, self.value) ==
(other.src, other.name, other.value))
# =============================================================================
# Runtime configuration setting validators and their registry.
# =============================================================================
@validate(name=ConfWithId.ID)
def validate_conf_id(identifier):
if not isinstance(identifier, str):
raise ConfigTypeError(conf_name=ConfWithId.ID, conf_value=identifier)
if len(identifier) > 128:
raise ConfigValueError(conf_name=ConfWithId.ID, conf_value=identifier)
return identifier
@validate(name=ConfWithId.NAME)
def validate_conf_name(name):
if not isinstance(name, str):
raise ConfigTypeError(conf_name=ConfWithId.NAME, conf_value=name)
if len(name) > 128:
raise ConfigValueError(conf_name=ConfWithId.NAME, conf_value=name)
return name
@validate(name=ConfWithId.DESCRIPTION)
def validate_conf_desc(description):
if not isinstance(description, str):
raise ConfigTypeError(conf_name=ConfWithId.DESCRIPTION,
conf_value=description)
return description
@validate(name=ConfWithStats.STATS_LOG_ENABLED)
def validate_stats_log_enabled(stats_log_enabled):
if not isinstance(stats_log_enabled, bool):
raise ConfigTypeError(desc='Statistics log enabled settings can only'
' be boolean type.')
return stats_log_enabled
@validate(name=ConfWithStats.STATS_TIME)
def validate_stats_time(stats_time):
if not isinstance(stats_time, numbers.Integral):
raise ConfigTypeError(desc='Statistics log timer value has to be of '
'integral type but got: %r' % stats_time)
if stats_time < 10:
raise ConfigValueError(desc='Statistics log timer cannot be set to '
'less then 10 sec, given timer value %s.' %
stats_time)
return stats_time
@validate(name=CAP_REFRESH)
def validate_cap_refresh(crefresh):
if not isinstance(crefresh, bool):
raise ConfigTypeError(desc='Invalid Refresh capability settings: %s. '
'Boolean value expected' % crefresh)
return crefresh
@validate(name=CAP_ENHANCED_REFRESH)
def validate_cap_enhanced_refresh(cer):
if not isinstance(cer, bool):
raise ConfigTypeError(desc='Invalid Enhanced Refresh capability '
'settings: %s. Boolean value expected' % cer)
return cer
@validate(name=CAP_FOUR_OCTET_AS_NUMBER)
def validate_cap_four_octet_as_number(cfoan):
if not isinstance(cfoan, bool):
raise ConfigTypeError(desc='Invalid Four-Octet AS Number capability '
'settings: %s boolean value expected' % cfoan)
return cfoan
@validate(name=CAP_MBGP_IPV4)
def validate_cap_mbgp_ipv4(cmv4):
if not isinstance(cmv4, bool):
raise ConfigTypeError(desc='Invalid MP-BGP IPv4 capability '
'settings: %s. Boolean value expected' % cmv4)
return cmv4
@validate(name=CAP_MBGP_IPV6)
def validate_cap_mbgp_ipv6(cmv6):
if not isinstance(cmv6, bool):
raise ConfigTypeError(desc='Invalid MP-BGP IPv6 capability '
'settings: %s. Boolean value expected' % cmv6)
return cmv6
@validate(name=CAP_MBGP_VPNV4)
def validate_cap_mbgp_vpnv4(cmv4):
if not isinstance(cmv4, bool):
raise ConfigTypeError(desc='Invalid MP-BGP VPNv4 capability '
'settings: %s. Boolean value expected' % cmv4)
return cmv4
@validate(name=CAP_MBGP_VPNV6)
def validate_cap_mbgp_vpnv6(cmv6):
if not isinstance(cmv6, bool):
raise ConfigTypeError(desc='Invalid MP-BGP VPNv6 capability '
'settings: %s. Boolean value expected' % cmv6)
return cmv6
@validate(name=CAP_MBGP_EVPN)
def validate_cap_mbgp_evpn(cmevpn):
if not isinstance(cmevpn, bool):
raise ConfigTypeError(desc='Invalid Ethernet VPN capability '
'settings: %s. Boolean value expected' % cmevpn)
return cmevpn
@validate(name=CAP_MBGP_IPV4FS)
def validate_cap_mbgp_ipv4fs(cmv4fs):
if not isinstance(cmv4fs, bool):
raise ConfigTypeError(desc='Invalid MP-BGP '
'IPv4 Flow Specification capability '
'settings: %s. Boolean value expected' % cmv4fs)
return cmv4fs
@validate(name=CAP_MBGP_VPNV4FS)
def validate_cap_mbgp_vpnv4fs(cmv4fs):
if not isinstance(cmv4fs, bool):
raise ConfigTypeError(desc='Invalid MP-BGP '
'VPNv4 Flow Specification capability '
'settings: %s. Boolean value expected' % cmv4fs)
return cmv4fs
@validate(name=CAP_RTC)
def validate_cap_rtc(cap_rtc):
if not isinstance(cap_rtc, bool):
raise ConfigTypeError(desc='Invalid type for specifying RTC '
'capability. Expected boolean got: %s' %
type(cap_rtc))
return cap_rtc
@validate(name=RTC_AS)
def validate_cap_rtc_as(rtc_as):
if not is_valid_asn(rtc_as):
raise ConfigValueError(desc='Invalid RTC AS configuration value: %s'
% rtc_as)
return rtc_as
@validate(name=HOLD_TIME)
def validate_hold_time(hold_time):
if ((hold_time is None) or (not isinstance(hold_time, int)) or
hold_time < 10):
raise ConfigValueError(desc='Invalid hold_time configuration value %s'
% hold_time)
return hold_time
@validate(name=MULTI_EXIT_DISC)
def validate_med(med):
if med is not None and not validation.is_valid_med(med):
raise ConfigValueError(desc='Invalid multi-exit-discriminatory (med)'
' value: %s.' % med)
return med
@validate(name=SITE_OF_ORIGINS)
def validate_soo_list(soo_list):
if not isinstance(soo_list, list):
raise ConfigTypeError(conf_name=SITE_OF_ORIGINS, conf_value=soo_list)
if len(soo_list) > MAX_NUM_SOO:
raise ConfigValueError(desc='Max. SOO is limited to %s' %
MAX_NUM_SOO)
if not all(validation.is_valid_ext_comm_attr(attr) for attr in soo_list):
raise ConfigValueError(conf_name=SITE_OF_ORIGINS,
conf_value=soo_list)
# Check if we have duplicates
unique_rts = set(soo_list)
if len(unique_rts) != len(soo_list):
raise ConfigValueError(desc='Duplicate value provided in %s' %
soo_list)
return soo_list
@validate(name=MAX_PREFIXES)
def validate_max_prefixes(max_prefixes):
if not isinstance(max_prefixes, six.integer_types):
raise ConfigTypeError(desc='Max. prefixes value should be of type '
'int or long but found %s' % type(max_prefixes))
if max_prefixes < 0:
raise ConfigValueError(desc='Invalid max. prefixes value: %s' %
max_prefixes)
return max_prefixes
@validate(name=ADVERTISE_PEER_AS)
def validate_advertise_peer_as(advertise_peer_as):
if not isinstance(advertise_peer_as, bool):
raise ConfigTypeError(desc='Invalid type for advertise-peer-as, '
'expected bool got %s' %
type(advertise_peer_as))
return advertise_peer_as
# =============================================================================
# Other utils.
# =============================================================================
def compute_optional_conf(conf_name, default_value, **all_config):
"""Returns *conf_name* settings if provided in *all_config*, else returns
*default_value*.
Validates *conf_name* value if provided.
"""
conf_value = all_config.get(conf_name)
if conf_value is not None:
# Validate configuration value.
conf_value = get_validator(conf_name)(conf_value)
else:
conf_value = default_value
return conf_value
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Lexers for agile languages.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
LexerContext, include, combined, do_insertions, bygroups, using
from pygments.token import Error, Text, Other, \
Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation
from pygments.util import get_bool_opt, get_list_opt, shebang_matches
from pygments import unistring as uni
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'Python3Lexer', 'Python3TracebackLexer', 'RubyLexer',
'RubyConsoleLexer', 'PerlLexer', 'LuaLexer', 'MoonScriptLexer',
'CrocLexer', 'MiniDLexer', 'IoLexer', 'TclLexer', 'FactorLexer', 'FancyLexer']
# b/w compatibility
from pygments.lexers.functional import SchemeLexer
from pygments.lexers.jvm import IokeLexer, ClojureLexer
line_re = re.compile('.*?\n')
class PythonLexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code.
"""
name = 'Python'
aliases = ['python', 'py']
filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac']
mimetypes = ['text/x-python', 'application/x-python']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|continue|del|elif|else|except|exec|'
r'finally|for|global|if|lambda|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[a-zA-Z0-9_.]+', Name.Decorator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'(?:[ \t]|\\\n)+', Text),
(r'as\b', Keyword.Namespace),
(r',', Operator),
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
(r'', Text, '#pop') # all else: go back
],
'fromimport': [
(r'(?:[ \t]|\\\n)+', Text),
(r'import\b', Keyword.Namespace, '#pop'),
(r'[a-zA-Z_.][a-zA-Z0-9_.]*', Name.Namespace),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?(2(\.\d)?)?')
class Python3Lexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
*New in Pygments 0.10.*
"""
name = 'Python 3'
aliases = ['python3', 'py3']
filenames = [] # Nothing until Python 3 gets widespread
mimetypes = ['text/x-python3', 'application/x-python3']
flags = re.MULTILINE | re.UNICODE
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
tokens = PythonLexer.tokens.copy()
tokens['keywords'] = [
(r'(assert|break|continue|del|elif|else|except|'
r'finally|for|global|if|lambda|pass|raise|nonlocal|'
r'return|try|while|yield|as|with|True|False|None)\b', Keyword),
]
tokens['builtins'] = [
(r'(?<!\.)(__import__|abs|all|any|bin|bool|bytearray|bytes|'
r'chr|classmethod|cmp|compile|complex|delattr|dict|dir|'
r'divmod|enumerate|eval|filter|float|format|frozenset|getattr|'
r'globals|hasattr|hash|hex|id|input|int|isinstance|issubclass|'
r'iter|len|list|locals|map|max|memoryview|min|next|object|oct|'
r'open|ord|pow|print|property|range|repr|reversed|round|'
r'set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|'
r'vars|zip)\b', Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented)\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|BufferError|BytesWarning|DeprecationWarning|'
r'EOFError|EnvironmentError|Exception|FloatingPointError|'
r'FutureWarning|GeneratorExit|IOError|ImportError|'
r'ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplementedError|OSError|OverflowError|'
r'PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
]
tokens['numbers'] = [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
]
tokens['backtick'] = []
tokens['name'] = [
(r'@[a-zA-Z0-9_]+', Name.Decorator),
(uni_name, Name),
]
tokens['funcname'] = [
(uni_name, Name.Function, '#pop')
]
tokens['classname'] = [
(uni_name, Name.Class, '#pop')
]
tokens['import'] = [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
]
tokens['fromimport'] = [
(r'(\s+)(import)\b', bygroups(Text, Keyword), '#pop'),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
]
# don't highlight "%s" substitutions
tokens['strings'] = [
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
]
def analyse_text(text):
return shebang_matches(text, r'pythonw?3(\.\d)?')
class PythonConsoleLexer(Lexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print a
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``False``.
*New in Pygments 1.0.*
"""
name = 'Python console session'
aliases = ['pycon']
mimetypes = ['text/x-python-doctest']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
if self.python3:
pylexer = Python3Lexer(**self.options)
tblexer = Python3TracebackLexer(**self.options)
else:
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
curtb = ''
tbindex = 0
tb = 0
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'>>> ') or line.startswith(u'... '):
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:4])]))
curcode += line[4:]
elif line.rstrip() == u'...' and not tb:
# only a new >>> prompt can end an exception block
# otherwise an ellipsis in place of the traceback frames
# will be mishandled
insertions.append((len(curcode),
[(0, Generic.Prompt, u'...')]))
curcode += line[3:]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if (line.startswith(u'Traceback (most recent call last):') or
re.match(ur' File "[^"]+", line \d+\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
elif line == 'KeyboardInterrupt\n':
yield match.start(), Name.Class, line
elif tb:
curtb += line
if not (line.startswith(' ') or line.strip() == u'...'):
tb = 0
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
class PythonTracebackLexer(RegexLexer):
"""
For Python tracebacks.
*New in Pygments 0.7.*
"""
name = 'Python Traceback'
aliases = ['pytb']
filenames = ['*.pytb']
mimetypes = ['text/x-python-traceback']
tokens = {
'root': [
(r'^Traceback \(most recent call last\):\n',
Generic.Traceback, 'intb'),
# SyntaxError starts with this.
(r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
(r'^.*\n', Other),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(PythonLexer), Text)),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
}
class Python3TracebackLexer(RegexLexer):
"""
For Python 3.0 tracebacks, with support for chained exceptions.
*New in Pygments 1.0.*
"""
name = 'Python 3.0 Traceback'
aliases = ['py3tb']
filenames = ['*.py3tb']
mimetypes = ['text/x-python3-traceback']
tokens = {
'root': [
(r'\n', Text),
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
(r'^During handling of the above exception, another '
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python3Lexer), Text)),
(r'^([ \t]*)(\.\.\.)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Generic.Error, Text, Name, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Generic.Error, Text), '#pop')
],
}
class RubyLexer(ExtendedRegexLexer):
"""
For `Ruby <http://www.ruby-lang.org>`_ source code.
"""
name = 'Ruby'
aliases = ['rb', 'ruby', 'duby']
filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec',
'*.rbx', '*.duby']
mimetypes = ['text/x-ruby', 'application/x-ruby']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Ruby...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), Name.Constant, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), Name.Constant, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_rubystrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
# easy ones
(r'\:@{0,2}([a-zA-Z_]\w*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, name in ('\\{', '\\}', 'cb'), \
('\\[', '\\]', 'sb'), \
('\\(', '\\)', 'pa'), \
('<', '>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[qsw]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Regex),
(r'(?<!\\)' + lbrace, String.Regex, '#push'),
(r'(?<!\\)' + rbrace + '[mixounse]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + lbrace + rbrace + ']', String.Regex),
(r'[^\\#' + lbrace + rbrace + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'%[qsw]([^a-zA-Z0-9])((?:\\\1|(?!\1).)*)\1', String.Other),
(r'(%[QWx]([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'=begin\s.*?\n=end.*?$', Comment.Multiline),
# keywords
(r'(BEGIN|END|alias|begin|break|case|defined\?|'
r'do|else|elsif|end|ensure|for|if|in|next|redo|'
r'rescue|raise|retry|return|super|then|undef|unless|until|when|'
r'while|yield)\b', Keyword),
# start of function, class and module names
(r'(module)(\s+)([a-zA-Z_][a-zA-Z0-9_]*(::[a-zA-Z_][a-zA-Z0-9_]*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
# special methods
(r'(initialize|new|loop|include|extend|raise|attr_reader|'
r'attr_writer|attr_accessor|attr|catch|throw|private|'
r'module_function|public|protected|true|false|nil)\b',
Keyword.Pseudo),
(r'(not|and|or)\b', Operator.Word),
(r'(autoload|block_given|const_defined|eql|equal|frozen|include|'
r'instance_of|is_a|iterator|kind_of|method_defined|nil|'
r'private_method_defined|protected_method_defined|'
r'public_method_defined|respond_to|tainted)\?', Name.Builtin),
(r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
(r'(?<!\.)(Array|Float|Integer|String|__id__|__send__|abort|'
r'ancestors|at_exit|autoload|binding|callcc|caller|'
r'catch|chomp|chop|class_eval|class_variables|'
r'clone|const_defined\?|const_get|const_missing|const_set|'
r'constants|display|dup|eval|exec|exit|extend|fail|fork|'
r'format|freeze|getc|gets|global_variables|gsub|'
r'hash|id|included_modules|inspect|instance_eval|'
r'instance_method|instance_methods|'
r'instance_variable_get|instance_variable_set|instance_variables|'
r'lambda|load|local_variables|loop|'
r'method|method_missing|methods|module_eval|name|'
r'object_id|open|p|print|printf|private_class_method|'
r'private_instance_methods|'
r'private_methods|proc|protected_instance_methods|'
r'protected_methods|public_class_method|'
r'public_instance_methods|public_methods|'
r'putc|puts|raise|rand|readline|readlines|require|'
r'scan|select|self|send|set_trace_func|singleton_methods|sleep|'
r'split|sprintf|srand|sub|syscall|system|taint|'
r'test|throw|to_a|to_s|trace_var|trap|untaint|untrace_var|'
r'warn)\b', Name.Builtin),
(r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=(?:\s|;|\.)index\s)|'
r'(?<=(?:\s|;|\.)scan\s)|'
r'(?<=(?:\s|;|\.)sub\s)|'
r'(?<=(?:\s|;|\.)sub!\s)|'
r'(?<=(?:\s|;|\.)gsub\s)|'
r'(?<=(?:\s|;|\.)gsub!\s)|'
r'(?<=(?:\s|;|\.)match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance),
(r'\$[a-zA-Z0-9_]+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][a-zA-Z0-9_]+', Name.Constant),
# this is needed because ruby attributes can look
# like keywords (class) or like this: ` ?!?
(r'(\.|::)([a-zA-Z_]\w*[\!\?]?|[*%&^`~+-/\[<>=])',
bygroups(Operator, Name)),
(r'[a-zA-Z_]\w*[\!\?]?', Name),
(r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'\(', Punctuation, 'defexpr'),
(r'(?:([a-zA-Z_][a-zA-Z0-9_]*)(\.))?'
r'([a-zA-Z_]\w*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
(r'', Text, '#pop')
],
'classname': [
(r'\(', Punctuation, 'defexpr'),
(r'<<', Operator, '#pop'),
(r'[A-Z_]\w*', Name.Class, '#pop'),
(r'', Text, '#pop')
],
'defexpr': [
(r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
(r'\(', Operator, '#push'),
include('root')
],
'in-intp': [
('}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#{', String.Interpol, 'in-intp'),
(r'#@@?[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol),
(r'#\$[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol)
],
'string-intp-escaped': [
include('string-intp'),
(r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})',
String.Escape)
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[mixounse]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
tokens.update(gen_rubystrings_rules())
def analyse_text(text):
return shebang_matches(text, r'ruby(1\.\d)?')
class RubyConsoleLexer(Lexer):
"""
For Ruby interactive console (**irb**) output like:
.. sourcecode:: rbcon
irb(main):001:0> a = 1
=> 1
irb(main):002:0> puts a
1
=> nil
"""
name = 'Ruby irb session'
aliases = ['rbcon', 'irb']
mimetypes = ['text/x-ruby-shellsession']
_prompt_re = re.compile('irb\([a-zA-Z_][a-zA-Z0-9_]*\):\d{3}:\d+[>*"\'] '
'|>> |\?> ')
def get_tokens_unprocessed(self, text):
rblexer = RubyLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
class PerlLexer(RegexLexer):
"""
For `Perl <http://www.perl.org>`_ source code.
"""
name = 'Perl'
aliases = ['perl', 'pl']
filenames = ['*.pl', '*.pm']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
# TODO: give this to a perl guy who knows how to parse perl...
tokens = {
'balanced-regex': [
(r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'{(\\\\|\\[^\\]|[^\\}])*}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\[^\\]|[^\\\)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\[^\\]|[^\\\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\[^\\]|[^\\\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\[^\\]|[^\\\$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\#.*?$', Comment.Single),
(r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
(r'(case|continue|do|else|elsif|for|foreach|if|last|my|'
r'next|our|redo|reset|then|unless|until|while|use|'
r'print|new|BEGIN|CHECK|INIT|END|return)\b', Keyword),
(r'(format)(\s+)([a-zA-Z0-9_]+)(\s*)(=)(\s*\n)',
bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'),
(r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
# common delimiters
(r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*',
String.Regex),
(r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
(r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
(r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*',
String.Regex),
(r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*',
String.Regex),
# balanced delimiters
(r's{(\\\\|\\[^\\]|[^\\}])*}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex,
'balanced-regex'),
(r's\((\\\\|\\[^\\]|[^\\\)])*\)\s*', String.Regex,
'balanced-regex'),
(r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'),
(r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*',
String.Regex),
(r'\s+', Text),
(r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|'
r'chmod|chomp|chop|chown|chr|chroot|close|closedir|connect|'
r'continue|cos|crypt|dbmclose|dbmopen|defined|delete|die|'
r'dump|each|endgrent|endhostent|endnetent|endprotoent|'
r'endpwent|endservent|eof|eval|exec|exists|exit|exp|fcntl|'
r'fileno|flock|fork|format|formline|getc|getgrent|getgrgid|'
r'getgrnam|gethostbyaddr|gethostbyname|gethostent|getlogin|'
r'getnetbyaddr|getnetbyname|getnetent|getpeername|getpgrp|'
r'getppid|getpriority|getprotobyname|getprotobynumber|'
r'getprotoent|getpwent|getpwnam|getpwuid|getservbyname|'
r'getservbyport|getservent|getsockname|getsockopt|glob|gmtime|'
r'goto|grep|hex|import|index|int|ioctl|join|keys|kill|last|'
r'lc|lcfirst|length|link|listen|local|localtime|log|lstat|'
r'map|mkdir|msgctl|msgget|msgrcv|msgsnd|my|next|no|oct|open|'
r'opendir|ord|our|pack|package|pipe|pop|pos|printf|'
r'prototype|push|quotemeta|rand|read|readdir|'
r'readline|readlink|readpipe|recv|redo|ref|rename|require|'
r'reverse|rewinddir|rindex|rmdir|scalar|seek|seekdir|'
r'select|semctl|semget|semop|send|setgrent|sethostent|setnetent|'
r'setpgrp|setpriority|setprotoent|setpwent|setservent|'
r'setsockopt|shift|shmctl|shmget|shmread|shmwrite|shutdown|'
r'sin|sleep|socket|socketpair|sort|splice|split|sprintf|sqrt|'
r'srand|stat|study|substr|symlink|syscall|sysopen|sysread|'
r'sysseek|system|syswrite|tell|telldir|tie|tied|time|times|tr|'
r'truncate|uc|ucfirst|umask|undef|unlink|unpack|unshift|untie|'
r'utime|values|vec|wait|waitpid|wantarray|warn|write'
r')\b', Name.Builtin),
(r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
(r'<<([\'"]?)([a-zA-Z_][a-zA-Z0-9_]*)\1;?\n.*?\n\2\n', String),
(r'__END__', Comment.Preproc, 'end-part'),
(r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
(r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
(r'[$@%#]+', Name.Variable, 'varname'),
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
Number.Float),
(r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
(r'\d+(_\d+)*', Number.Integer),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick),
(r'<([^\s>]+)>', String.Regex),
(r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
(r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
(r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
(r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
(r'(q|qq|qw|qr|qx)([^a-zA-Z0-9])(.|\n)*?\2', String.Other),
(r'package\s+', Keyword, 'modulename'),
(r'sub\s+', Keyword, 'funcname'),
(r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&^|!\\~]=?', Operator),
(r'[\(\)\[\]:;,<>/\?\{\}]', Punctuation), # yes, there's no shortage
# of punctuation in Perl!
(r'(?=\w)', Name, 'name'),
],
'format': [
(r'\.\n', String.Interpol, '#pop'),
(r'[^\n]*\n', String.Interpol),
],
'varname': [
(r'\s+', Text),
(r'\{', Punctuation, '#pop'), # hash syntax?
(r'\)|,', Punctuation, '#pop'), # argument specifier
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name.Variable, '#pop'),
],
'name': [
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name, '#pop'),
(r'[A-Z_]+(?=[^a-zA-Z0-9_])', Name.Constant, '#pop'),
(r'(?=[^a-zA-Z0-9_])', Text, '#pop'),
],
'modulename': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop')
],
'funcname': [
(r'[a-zA-Z_]\w*[\!\?]?', Name.Function),
(r'\s+', Text),
# argument declaration
(r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)),
(r'.*?{', Punctuation, '#pop'),
(r';', Punctuation, '#pop'),
],
'cb-string': [
(r'\\[\{\}\\]', String.Other),
(r'\\', String.Other),
(r'\{', String.Other, 'cb-string'),
(r'\}', String.Other, '#pop'),
(r'[^\{\}\\]+', String.Other)
],
'rb-string': [
(r'\\[\(\)\\]', String.Other),
(r'\\', String.Other),
(r'\(', String.Other, 'rb-string'),
(r'\)', String.Other, '#pop'),
(r'[^\(\)]+', String.Other)
],
'sb-string': [
(r'\\[\[\]\\]', String.Other),
(r'\\', String.Other),
(r'\[', String.Other, 'sb-string'),
(r'\]', String.Other, '#pop'),
(r'[^\[\]]+', String.Other)
],
'lt-string': [
(r'\\[\<\>\\]', String.Other),
(r'\\', String.Other),
(r'\<', String.Other, 'lt-string'),
(r'\>', String.Other, '#pop'),
(r'[^\<\>]+', String.Other)
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
def analyse_text(text):
if shebang_matches(text, r'perl'):
return True
if 'my $' in text:
return 0.9
return 0.1 # who knows, might still be perl!
class LuaLexer(RegexLexer):
"""
For `Lua <http://www.lua.org>`_ source code.
Additional options accepted:
`func_name_highlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabled_modules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted.
To get a list of allowed modules have a look into the
`_luabuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._luabuiltins import MODULES
>>> MODULES.keys()
['string', 'coroutine', 'modules', 'io', 'basic', ...]
"""
name = 'Lua'
aliases = ['lua']
filenames = ['*.lua', '*.wlua']
mimetypes = ['text/x-lua', 'application/x-lua']
tokens = {
'root': [
# lua allows a file to start with a shebang
(r'#!(.*?)$', Comment.Preproc),
(r'', Text, 'base'),
],
'base': [
(r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
('(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]', Text),
# multiline strings
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(==|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#])', Operator),
(r'[\[\]\{\}\(\)\.,:;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|'
r'while)\b', Keyword),
(r'(local)\b', Keyword.Declaration),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(function)\b', Keyword, 'funcname'),
(r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'funcname': [
(r'\s+', Text),
('(?:([A-Za-z_][A-Za-z0-9_]*)(\.))?([A-Za-z_][A-Za-z0-9_]*)',
bygroups(Name.Class, Punctuation, Name.Function), '#pop'),
# inline function
('\(', Punctuation, '#pop'),
],
# if I understand correctly, every character is valid in a lua string,
# so this state is only for later corrections
'string': [
('.', String)
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String, '#pop'),
include('string')
],
'dqs': [
('"', String, '#pop'),
include('string')
]
}
def __init__(self, **options):
self.func_name_highlighting = get_bool_opt(
options, 'func_name_highlighting', True)
self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._luabuiltins import MODULES
for mod, func in MODULES.iteritems():
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self._functions:
yield index, Name.Builtin, value
continue
elif '.' in value:
a, b = value.split('.')
yield index, Name, a
yield index + len(a), Punctuation, u'.'
yield index + len(a) + 1, Name, b
continue
yield index, token, value
class MoonScriptLexer(LuaLexer):
"""
For `MoonScript <http://moonscript.org.org>`_ source code.
*New in Pygments 1.5.*
"""
name = "MoonScript"
aliases = ["moon", "moonscript"]
filenames = ["*.moon"]
mimetypes = ['text/x-moonscript', 'application/x-moonscript']
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
(r'', Text, 'base'),
],
'base': [
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
(r'(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(->|=>)', Name.Function),
(r':[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
(r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator),
(r'[;,]', Punctuation),
(r'[\[\]\{\}\(\)]', Keyword.Type),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Variable),
(r"(class|extends|if|then|super|do|with|import|export|"
r"while|elseif|return|for|in|from|when|using|else|"
r"and|or|not|switch|break)\b", Keyword),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(and|or|not)\b', Operator.Word),
(r'(self)\b', Name.Builtin.Pseudo),
(r'@@?([a-zA-Z_][a-zA-Z0-9_]*)?', Name.Variable.Class),
(r'[A-Z]\w*', Name.Class), # proper name
(r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String.Single, '#pop'),
(".", String)
],
'dqs': [
('"', String.Double, '#pop'),
(".", String)
]
}
def get_tokens_unprocessed(self, text):
# set . as Operator instead of Punctuation
for index, token, value in \
LuaLexer.get_tokens_unprocessed(self, text):
if token == Punctuation and value == ".":
token = Operator
yield index, token, value
class CrocLexer(RegexLexer):
"""
For `Croc <http://jfbillingsley.com/croc>`_ source.
"""
name = 'Croc'
filenames = ['*.croc']
aliases = ['croc']
mimetypes = ['text/x-crocsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/\*', Comment.Multiline, 'nestedcomment'),
# Keywords
(r'(as|assert|break|case|catch|class|continue|default'
r'|do|else|finally|for|foreach|function|global|namespace'
r'|if|import|in|is|local|module|return|scope|super|switch'
r'|this|throw|try|vararg|while|with|yield)\b', Keyword),
(r'(false|true|null)\b', Keyword.Constant),
# FloatLiteral
(r'([0-9][0-9_]*)(?=[.eE])(\.[0-9][0-9_]*)?([eE][+\-]?[0-9_]+)?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[bB][01][01_]*', Number),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex),
# -- Decimal
(r'([0-9][0-9_]*)(?![.eE])', Number.Integer),
# CharacterLiteral
(r"""'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'@"(""|[^"])*"', String),
(r'@`(``|[^`])*`', String),
(r"@'(''|[^'])*'", String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(
r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nestedcomment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
}
class MiniDLexer(CrocLexer):
"""
For MiniD source. MiniD is now known as Croc.
"""
name = 'MiniD'
filenames = ['*.md']
aliases = ['minid']
mimetypes = ['text/x-minidsrc']
class IoLexer(RegexLexer):
"""
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
*New in Pygments 0.10.*
"""
name = 'Io'
filenames = ['*.io']
aliases = ['io']
mimetypes = ['text/x-iosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'#(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Operators
(r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
Operator),
# keywords
(r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
Keyword),
# constants
(r'(nil|false|true)\b', Name.Constant),
# names
(r'(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
]
}
class TclLexer(RegexLexer):
"""
For Tcl source code.
*New in Pygments 0.10.*
"""
keyword_cmds_re = (
r'\b(after|apply|array|break|catch|continue|elseif|else|error|'
r'eval|expr|for|foreach|global|if|namespace|proc|rename|return|'
r'set|switch|then|trace|unset|update|uplevel|upvar|variable|'
r'vwait|while)\b'
)
builtin_cmds_re = (
r'\b(append|bgerror|binary|cd|chan|clock|close|concat|dde|dict|'
r'encoding|eof|exec|exit|fblocked|fconfigure|fcopy|file|'
r'fileevent|flush|format|gets|glob|history|http|incr|info|interp|'
r'join|lappend|lassign|lindex|linsert|list|llength|load|loadTk|'
r'lrange|lrepeat|lreplace|lreverse|lsearch|lset|lsort|mathfunc|'
r'mathop|memory|msgcat|open|package|pid|pkg::create|pkg_mkIndex|'
r'platform|platform::shell|puts|pwd|re_syntax|read|refchan|'
r'regexp|registry|regsub|scan|seek|socket|source|split|string|'
r'subst|tell|time|tm|unknown|unload)\b'
)
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w\.\-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
(r'}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w\.\-\:]+)', Name.Variable),
(r'([\w\.\-\:]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)\b', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(?s)(\\\\|\\[0-7]+|\\.|\\\n|[^\]\\])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
class FactorLexer(RegexLexer):
"""
Lexer for the `Factor <http://factorcode.org>`_ language.
*New in Pygments 1.4.*
"""
name = 'Factor'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
flags = re.MULTILINE | re.UNICODE
builtin_kernel = (
r'(?:or|2bi|2tri|while|wrapper|nip|4dip|wrapper\\?|bi\\*|'
r'callstack>array|both\\?|hashcode|die|dupd|callstack|'
r'callstack\\?|3dup|tri@|pick|curry|build|\\?execute|3bi|'
r'prepose|>boolean|\\?if|clone|eq\\?|tri\\*|\\?|=|swapd|'
r'2over|2keep|3keep|clear|2dup|when|not|tuple\\?|dup|2bi\\*|'
r'2tri\\*|call|tri-curry|object|bi@|do|unless\\*|if\\*|loop|'
r'bi-curry\\*|drop|when\\*|assert=|retainstack|assert\\?|-rot|'
r'execute|2bi@|2tri@|boa|with|either\\?|3drop|bi|curry\\?|'
r'datastack|until|3dip|over|3curry|tri-curry\\*|tri-curry@|swap|'
r'and|2nip|throw|bi-curry|\\(clone\\)|hashcode\\*|compose|2dip|if|3tri|'
r'unless|compose\\?|tuple|keep|2curry|equal\\?|assert|tri|2drop|'
r'most|<wrapper>|boolean\\?|identity-hashcode|identity-tuple\\?|'
r'null|new|dip|bi-curry@|rot|xor|identity-tuple|boolean)\s'
)
builtin_assocs = (
r'(?:\\?at|assoc\\?|assoc-clone-like|assoc=|delete-at\\*|'
r'assoc-partition|extract-keys|new-assoc|value\\?|assoc-size|'
r'map>assoc|push-at|assoc-like|key\\?|assoc-intersect|'
r'assoc-refine|update|assoc-union|assoc-combine|at\\*|'
r'assoc-empty\\?|at\\+|set-at|assoc-all\\?|assoc-subset\\?|'
r'assoc-hashcode|change-at|assoc-each|assoc-diff|zip|values|'
r'value-at|rename-at|inc-at|enum\\?|at|cache|assoc>map|<enum>|'
r'assoc|assoc-map|enum|value-at\\*|assoc-map-as|>alist|'
r'assoc-filter-as|clear-assoc|assoc-stack|maybe-set-at|'
r'substitute|assoc-filter|2cache|delete-at|assoc-find|keys|'
r'assoc-any\\?|unzip)\s'
)
builtin_combinators = (
r'(?:case|execute-effect|no-cond|no-case\\?|3cleave>quot|2cleave|'
r'cond>quot|wrong-values\\?|no-cond\\?|cleave>quot|no-case|'
r'case>quot|3cleave|wrong-values|to-fixed-point|alist>quot|'
r'case-find|cond|cleave|call-effect|2cleave>quot|recursive-hashcode|'
r'linear-case-quot|spread|spread>quot)\s'
)
builtin_math = (
r'(?:number=|if-zero|next-power-of-2|each-integer|\\?1\\+|'
r'fp-special\\?|imaginary-part|unless-zero|float>bits|number\\?|'
r'fp-infinity\\?|bignum\\?|fp-snan\\?|denominator|fp-bitwise=|\\*|'
r'\\+|power-of-2\\?|-|u>=|/|>=|bitand|log2-expects-positive|<|'
r'log2|>|integer\\?|number|bits>double|2/|zero\\?|(find-integer)|'
r'bits>float|float\\?|shift|ratio\\?|even\\?|ratio|fp-sign|bitnot|'
r'>fixnum|complex\\?|/i|/f|byte-array>bignum|when-zero|sgn|>bignum|'
r'next-float|u<|u>|mod|recip|rational|find-last-integer|>float|'
r'(all-integers\\?)|2^|times|integer|fixnum\\?|neg|fixnum|sq|'
r'bignum|(each-integer)|bit\\?|fp-qnan\\?|find-integer|complex|'
r'<fp-nan>|real|double>bits|bitor|rem|fp-nan-payload|all-integers\\?|'
r'real-part|log2-expects-positive\\?|prev-float|align|unordered\\?|'
r'float|fp-nan\\?|abs|bitxor|u<=|odd\\?|<=|/mod|rational\\?|>integer|'
r'real\\?|numerator)\s'
)
builtin_sequences = (
r'(?:member-eq\\?|append|assert-sequence=|find-last-from|trim-head-slice|'
r'clone-like|3sequence|assert-sequence\\?|map-as|last-index-from|'
r'reversed|index-from|cut\\*|pad-tail|remove-eq!|concat-as|'
r'but-last|snip|trim-tail|nths|nth|2selector|sequence|slice\\?|'
r'<slice>|partition|remove-nth|tail-slice|empty\\?|tail\\*|'
r'if-empty|find-from|virtual-sequence\\?|member\\?|set-length|'
r'drop-prefix|unclip|unclip-last-slice|iota|map-sum|'
r'bounds-error\\?|sequence-hashcode-step|selector-for|'
r'accumulate-as|map|start|midpoint@|\\(accumulate\\)|rest-slice|'
r'prepend|fourth|sift|accumulate!|new-sequence|follow|map!|'
r'like|first4|1sequence|reverse|slice|unless-empty|padding|'
r'virtual@|repetition\\?|set-last|index|4sequence|max-length|'
r'set-second|immutable-sequence|first2|first3|replicate-as|'
r'reduce-index|unclip-slice|supremum|suffix!|insert-nth|'
r'trim-tail-slice|tail|3append|short|count|suffix|concat|'
r'flip|filter|sum|immutable\\?|reverse!|2sequence|map-integers|'
r'delete-all|start\\*|indices|snip-slice|check-slice|sequence\\?|'
r'head|map-find|filter!|append-as|reduce|sequence=|halves|'
r'collapse-slice|interleave|2map|filter-as|binary-reduce|'
r'slice-error\\?|product|bounds-check\\?|bounds-check|harvest|'
r'immutable|virtual-exemplar|find|produce|remove|pad-head|last|'
r'replicate|set-fourth|remove-eq|shorten|reversed\\?|'
r'map-find-last|3map-as|2unclip-slice|shorter\\?|3map|find-last|'
r'head-slice|pop\\*|2map-as|tail-slice\\*|but-last-slice|'
r'2map-reduce|iota\\?|collector-for|accumulate|each|selector|'
r'append!|new-resizable|cut-slice|each-index|head-slice\\*|'
r'2reverse-each|sequence-hashcode|pop|set-nth|\\?nth|'
r'<flat-slice>|second|join|when-empty|collector|'
r'immutable-sequence\\?|<reversed>|all\\?|3append-as|'
r'virtual-sequence|subseq\\?|remove-nth!|push-either|new-like|'
r'length|last-index|push-if|2all\\?|lengthen|assert-sequence|'
r'copy|map-reduce|move|third|first|3each|tail\\?|set-first|'
r'prefix|bounds-error|any\\?|<repetition>|trim-slice|exchange|'
r'surround|2reduce|cut|change-nth|min-length|set-third|produce-as|'
r'push-all|head\\?|delete-slice|rest|sum-lengths|2each|head\\*|'
r'infimum|remove!|glue|slice-error|subseq|trim|replace-slice|'
r'push|repetition|map-index|trim-head|unclip-last|mismatch)\s'
)
builtin_namespaces = (
r'(?:global|\\+@|change|set-namestack|change-global|init-namespaces|'
r'on|off|set-global|namespace|set|with-scope|bind|with-variable|'
r'inc|dec|counter|initialize|namestack|get|get-global|make-assoc)\s'
)
builtin_arrays = (
r'(?:<array>|2array|3array|pair|>array|1array|4array|pair\\?|'
r'array|resize-array|array\\?)\s'
)
builtin_io = (
r'(?:\\+character\\+|bad-seek-type\\?|readln|each-morsel|stream-seek|'
r'read|print|with-output-stream|contents|write1|stream-write1|'
r'stream-copy|stream-element-type|with-input-stream|'
r'stream-print|stream-read|stream-contents|stream-tell|'
r'tell-output|bl|seek-output|bad-seek-type|nl|stream-nl|write|'
r'flush|stream-lines|\\+byte\\+|stream-flush|read1|'
r'seek-absolute\\?|stream-read1|lines|stream-readln|'
r'stream-read-until|each-line|seek-end|with-output-stream\\*|'
r'seek-absolute|with-streams|seek-input|seek-relative\\?|'
r'input-stream|stream-write|read-partial|seek-end\\?|'
r'seek-relative|error-stream|read-until|with-input-stream\\*|'
r'with-streams\\*|tell-input|each-block|output-stream|'
r'stream-read-partial|each-stream-block|each-stream-line)\s'
)
builtin_strings = (
r'(?:resize-string|>string|<string>|1string|string|string\\?)\s'
)
builtin_vectors = (
r'(?:vector\\?|<vector>|\\?push|vector|>vector|1vector)\s'
)
builtin_continuations = (
r'(?:with-return|restarts|return-continuation|with-datastack|'
r'recover|rethrow-restarts|<restart>|ifcc|set-catchstack|'
r'>continuation<|cleanup|ignore-errors|restart\\?|'
r'compute-restarts|attempt-all-error|error-thread|continue|'
r'<continuation>|attempt-all-error\\?|condition\\?|'
r'<condition>|throw-restarts|error|catchstack|continue-with|'
r'thread-error-hook|continuation|rethrow|callcc1|'
r'error-continuation|callcc0|attempt-all|condition|'
r'continuation\\?|restart|return)\s'
)
tokens = {
'root': [
# TODO: (( inputs -- outputs ))
# TODO: << ... >>
# defining words
(r'(\s*)(:|::|MACRO:|MEMO:)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Function)),
(r'(\s*)(M:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Class, Text, Name.Function)),
(r'(\s*)(GENERIC:)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Function)),
(r'(\s*)(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Text, Keyword, Text, Name.Function, Text, Name.Function)),
(r'(\()(\s+)', bygroups(Name.Function, Text), 'stackeffect'),
(r'\;\s', Keyword),
# imports and namespaces
(r'(USING:)((?:\s|\\\s)+)',
bygroups(Keyword.Namespace, Text), 'import'),
(r'(USE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(UNUSE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+)(=>)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text, Text)),
(r'(IN:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'(?:ALIAS|DEFER|FORGET|POSTPONE):', Keyword.Namespace),
# tuples and classes
(r'(TUPLE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class), 'slots'),
(r'(TUPLE:)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Class), 'slots'),
(r'(UNION:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(INTERSECTION:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+<\s+)(\S+)',
bygroups(Keyword, Text, Name.Class, Text, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Function, Text, Name.Class)),
(r'INSTANCE:', Keyword),
(r'SLOT:', Keyword),
(r'MIXIN:', Keyword),
(r'(?:SINGLETON|SINGLETONS):', Keyword),
# other syntax
(r'CONSTANT:', Keyword),
(r'(?:SYMBOL|SYMBOLS):', Keyword),
(r'ERROR:', Keyword),
(r'SYNTAX:', Keyword),
(r'(HELP:)(\s+)(\S+)', bygroups(Keyword, Text, Name.Function)),
(r'(MAIN:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Text, Name.Function)),
(r'(?:ALIEN|TYPEDEF|FUNCTION|STRUCT):', Keyword),
# vocab.private
# TODO: words inside vocab.private should have red names?
(r'(?:<PRIVATE|PRIVATE>)', Keyword.Namespace),
# strings
(r'"""\s+(?:.|\n)*?\s+"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'CHAR:\s+(\\[\\abfnrstv]*|\S)\s', String.Char),
# comments
(r'\!\s+.*$', Comment),
(r'#\!\s+.*$', Comment),
# boolean constants
(r'(t|f)\s', Name.Constant),
# numbers
(r'-?\d+\.\d+\s', Number.Float),
(r'-?\d+\s', Number.Integer),
(r'HEX:\s+[a-fA-F\d]+\s', Number.Hex),
(r'BIN:\s+[01]+\s', Number.Integer),
(r'OCT:\s+[0-7]+\s', Number.Oct),
# operators
(r'[-+/*=<>^]\s', Operator),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, Name.Builtin),
(builtin_assocs, Name.Builtin),
(builtin_combinators, Name.Builtin),
(builtin_math, Name.Builtin),
(builtin_sequences, Name.Builtin),
(builtin_namespaces, Name.Builtin),
(builtin_arrays, Name.Builtin),
(builtin_io, Name.Builtin),
(builtin_strings, Name.Builtin),
(builtin_vectors, Name.Builtin),
(builtin_continuations, Name.Builtin),
# whitespaces - usually not relevant
(r'\s+', Text),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s*\(', Name.Function, 'stackeffect'),
(r'\)', Name.Function, '#pop'),
(r'\-\-', Name.Function),
(r'\s+', Text),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Text),
(r';\s', Keyword, '#pop'),
(r'\S+', Name.Variable),
],
'import': [
(r';', Keyword, '#pop'),
(r'\S+', Name.Namespace),
(r'\s+', Text),
],
}
class FancyLexer(RegexLexer):
"""
Pygments Lexer For `Fancy <http://www.fancy-lang.org/>`_.
Fancy is a self-hosted, pure object-oriented, dynamic,
class-based, concurrent general-purpose programming language
running on Rubinius, the Ruby VM.
*New in Pygments 1.5.*
"""
name = 'Fancy'
filenames = ['*.fy', '*.fancypack']
aliases = ['fancy', 'fy']
mimetypes = ['text/x-fancysrc']
tokens = {
# copied from PerlLexer:
'balanced-regex': [
(r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'{(\\\\|\\}|[^}])*}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\\)|[^\)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\s+', Text),
# balanced delimiters (copied from PerlLexer):
(r's{(\\\\|\\}|[^}])*}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'),
# Comments
(r'#(.*?)\n', Comment.Single),
# Symbols
(r'\'([^\'\s\[\]\(\)\{\}]+|\[\])', String.Symbol),
# Multi-line DoubleQuotedString
(r'"""(\\\\|\\"|[^"])*"""', String),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# keywords
(r'(def|class|try|catch|finally|retry|return|return_local|match|'
r'case|->|=>)\b', Keyword),
# constants
(r'(self|super|nil|false|true)\b', Name.Constant),
(r'[(){};,/?\|:\\]', Punctuation),
# names
(r'(Object|Array|Hash|Directory|File|Class|String|Number|'
r'Enumerable|FancyEnumerable|Block|TrueClass|NilClass|'
r'FalseClass|Tuple|Symbol|Stack|Set|FancySpec|Method|Package|'
r'Range)\b', Name.Builtin),
# functions
(r'[a-zA-Z]([a-zA-Z0-9_]|[-+?!=*/^><%])*:', Name.Function),
# operators, must be below functions
(r'[-+*/~,<>=&!?%^\[\]\.$]+', Operator),
('[A-Z][a-zA-Z0-9_]*', Name.Constant),
('@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance),
('@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class),
('@@?', Operator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers - / checks are necessary to avoid mismarking regexes,
# see comment in RubyLexer
(r'(0[oO]?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0[xX][0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0[bB][01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
(r'\d+([eE][+-]?[0-9]+)|\d+\.\d+([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
]
}
| |
# Copyright (c) 2013 - 2021 Robin Malburn
# See the file license.txt for copying permission.
import sublime # type: ignore
from unittesting import DeferrableTestCase # type: ignore
from os import path
from typing import List, Tuple, Dict, Generator
try:
from lib import settings, entities
except ImportError:
# If we're running these tests in UnitTesting, then we need to use
# The package name - Tab Filter - so let's grab import lib and try again.
from importlib import import_module
settings = import_module(".lib.settings", "Tab Filter")
entities = import_module(".lib.entities", "Tab Filter")
TabSetting = settings.TabSetting
ShowCaptionsTabSetting = settings.ShowCaptionsTabSetting
IncludePathTabSetting = settings.IncludePathTabSetting
ShowGroupCaptionTabSetting = settings.ShowGroupCaptionTabSetting
Tab = entities.Tab
DEFAULT_SETINGS = settings.DEFAULT_SETINGS
class BaseSettingsTestCase(DeferrableTestCase):
"""Base settings test case to set up boiler plate methods."""
settings: sublime.Settings
layout: Dict[str, List]
def setUp(self) -> None:
self.settings = sublime.load_settings("tabfilter.sublime-settings")
self.layout = sublime.active_window().layout()
for setting in DEFAULT_SETINGS:
self.settings.set(setting, DEFAULT_SETINGS[setting])
# Close any existing views so as to avoid polluting the results.
for view in sublime.active_window().views():
view.window().focus_view(view)
view.window().run_command("close_file")
def tearDown(self) -> None:
# Restore the original layout
sublime.active_window().set_layout(self.layout)
for view in sublime.active_window().views():
view.window().focus_view(view)
view.set_scratch(True)
view.window().run_command("close_file")
class DefaultSettingsTestCase(BaseSettingsTestCase):
def test_defaults(self) -> None:
"""Tests that the default settings are honoured."""
scratch_view: sublime.View = sublime.active_window().new_file()
tabs: List[Tab] = [Tab(scratch_view)]
data_set: Tuple[Tuple[TabSetting, bool, str], ...] = (
(
ShowCaptionsTabSetting,
DEFAULT_SETINGS["show_captions"],
"show_captions"
),
(
IncludePathTabSetting,
DEFAULT_SETINGS["include_path"],
"include_path"
),
(
ShowGroupCaptionTabSetting,
DEFAULT_SETINGS["show_group_caption"],
"show_group_caption"
)
)
for (cls, enabled, caption) in data_set:
with self.subTest(cls=cls, enabled=enabled, caption=caption):
inst = cls(
self.settings,
sublime.active_window()
) # type: ignore
self.assertEqual(enabled, inst.is_enabled())
self.assertListEqual(tabs, inst.apply(tabs))
class ShowCaptionsTabSettingTestCase(BaseSettingsTestCase):
"""Tests the Show Captions Tab Settings."""
def test_setting_disabled(self) -> None:
"""Tests with the setting disabled."""
self.settings.set("show_captions", False)
setting: ShowCaptionsTabSetting = ShowCaptionsTabSetting(
self.settings,
sublime.active_window()
)
scratch_view: sublime.View = sublime.active_window().new_file()
tabs: List[Tab] = [Tab(scratch_view)]
self.assertFalse(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertListEqual([], tabs[0].get_captions())
def test_current_file(self) -> Generator[int, None, None]:
"""Tests detecting current file."""
setting: ShowCaptionsTabSetting = ShowCaptionsTabSetting(
self.settings,
sublime.active_window()
)
dir: str = path.dirname(__file__)
foo_fixture: str = path.normpath(
path.join(dir, "./fixtures/foo.txt")
)
bar_fixture: str = path.normpath(
path.join(dir, "./fixtures/bar.txt")
)
foo_view: sublime.View = sublime.active_window().open_file(foo_fixture)
bar_view: sublime.View = sublime.active_window().open_file(bar_fixture)
data_set: Tuple[Tuple[List[Tab], List[List[str]], sublime.View], ...]
data_set = (
([Tab(foo_view), Tab(bar_view)], [['Current File'], []], foo_view),
([Tab(foo_view), Tab(bar_view)], [[], ['Current File']], bar_view),
)
yield 100
for (tabs, captions, view) in data_set:
with self.subTest(tabs=tabs, captions=view, view=view):
view.window().focus_view(view)
self.assertTrue(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
actual = []
for tab in tabs:
actual.append(tab.get_captions())
self.assertListEqual(captions, actual)
def test_unsaved_file(self) -> None:
"""Tests detecting unsaved files."""
setting: ShowCaptionsTabSetting = ShowCaptionsTabSetting(
self.settings,
sublime.active_window()
)
scratch_view: sublime.View = sublime.active_window().new_file()
tabs: List[Tab] = [Tab(scratch_view)]
self.assertTrue(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertListEqual(
["Current File", "Unsaved File"],
tabs[0].get_captions()
)
def test_unsaved_changes(self) -> Generator[int, None, None]:
"""Tests detecting unsaved changes."""
setting: ShowCaptionsTabSetting = ShowCaptionsTabSetting(
self.settings,
sublime.active_window()
)
dir: str = path.dirname(__file__)
foo_fixture: str = path.normpath(
path.join(dir, "./fixtures/foo.txt")
)
foo_view: sublime.View = sublime.active_window().open_file(foo_fixture)
sublime.set_timeout(
lambda: foo_view.run_command("insert", {"characters": "foo"}),
100
)
yield 100
tabs: List[Tab] = [Tab(foo_view)]
self.assertTrue(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertListEqual(
["Current File", "Unsaved Changes"],
tabs[0].get_captions()
)
def test_read_only(self) -> None:
"""Tests detecting read only views."""
setting: ShowCaptionsTabSetting = ShowCaptionsTabSetting(
self.settings,
sublime.active_window()
)
scratch_view: sublime.View = sublime.active_window().new_file()
scratch_view.set_read_only(True)
tabs: List[Tab] = [Tab(scratch_view)]
self.assertTrue(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertListEqual(
["Current File", "Unsaved File", "Read Only"],
tabs[0].get_captions()
)
class IncludePathTabSettingTestCase(BaseSettingsTestCase):
"""Tests the Include path Tab Settings."""
def test_setting_disabled(self) -> Generator[int, None, None]:
"""Tests with the setting disabled."""
self.settings.set("include_path", False)
setting: IncludePathTabSetting = IncludePathTabSetting(
self.settings,
sublime.active_window()
)
dir: str = path.dirname(__file__)
foo_fixture: str = path.normpath(
path.join(dir, "./fixtures/foo.txt")
)
expected = path.basename(foo_fixture)
foo_view: sublime.View = sublime.active_window().open_file(foo_fixture)
tabs: List[Tab] = [Tab(foo_view)]
yield 100
self.assertFalse(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertEqual(expected, tabs[0].get_title())
self.assertEqual(foo_fixture, tabs[0].get_subtitle())
def test_with_file_view(self) -> Generator[int, None, None]:
"""Tests with the setting disabled."""
self.settings.set("include_path", True)
setting: IncludePathTabSetting = IncludePathTabSetting(
self.settings,
sublime.active_window()
)
dir: str = path.dirname(__file__)
foo_fixture: str = path.normpath(
path.join(dir, "./fixtures/foo.txt")
)
foo_view: sublime.View = sublime.active_window().open_file(foo_fixture)
tabs: List[Tab] = [Tab(foo_view)]
yield 100
self.assertTrue(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertEqual(foo_fixture, tabs[0].get_title())
self.assertEqual(foo_fixture, tabs[0].get_subtitle())
class ShowGroupCaptionsTabSettingTestCase(BaseSettingsTestCase):
"""Tests the Show Group Captions Tab Settings."""
def test_setting_disabled(self) -> None:
"""Tests with the setting disabled."""
self.settings.set("show_group_caption", False)
setting: ShowGroupCaptionTabSetting = ShowGroupCaptionTabSetting(
self.settings,
sublime.active_window()
)
scratch_view: sublime.View = sublime.active_window().new_file()
tabs: List[Tab] = [Tab(scratch_view)]
self.assertFalse(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertListEqual([], tabs[0].get_captions())
def test_single_group(self) -> None:
"""Tests applying to a single group (no caption expected)."""
self.settings.set("show_group_caption", True)
setting: ShowGroupCaptionTabSetting = ShowGroupCaptionTabSetting(
self.settings,
sublime.active_window()
)
scratch_view: sublime.View = sublime.active_window().new_file()
tabs: List[Tab] = [Tab(scratch_view)]
# single column layout
layout: Dict[str, List] = {
"cells": [[0, 0, 1, 1]],
"cols": [0.0, 1.0],
"rows": [0.0, 1.0]
}
sublime.active_window().set_layout(layout)
self.assertFalse(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
self.assertListEqual([], tabs[0].get_captions())
def test_multiple_groups(self) -> None:
"""Tests applying to multiple groups."""
self.settings.set("show_group_caption", True)
setting: ShowGroupCaptionTabSetting = ShowGroupCaptionTabSetting(
self.settings,
sublime.active_window()
)
scratch_view: sublime.View = sublime.active_window().new_file()
second_view: sublime.View = sublime.active_window().new_file()
# 2 column layout
layout: Dict[str, List] = {
"cells": [[0, 0, 1, 1], [1, 0, 2, 1]],
"cols": [0.0, 0.5, 1.0],
"rows": [0.0, 1.0]
}
sublime.active_window().set_layout(layout)
sublime.active_window().set_view_index(scratch_view, group=0, idx=0)
sublime.active_window().set_view_index(second_view, group=1, idx=0)
tabs: List[Tab] = [Tab(scratch_view), Tab(second_view)]
self.assertTrue(setting.is_enabled())
self.assertListEqual(tabs, setting.apply(tabs))
captions: List[List[str]] = [tab.get_captions() for tab in tabs]
self.assertListEqual([["Group: 1"], ["Group: 2"]], captions)
| |
# pylint: disable=no-self-use,unused-variable,expression-not-assigned
from unittest.mock import Mock, patch
import log
import pytest
from expecter import expect
from gitman import cli
from gitman.common import _Config
from gitman.exceptions import ScriptFailure, UncommittedChanges
class TestMain:
"""Unit tests for the top-level arguments."""
def test_main(self):
"""Verify the top-level command can be run."""
mock_function = Mock(return_value=True)
cli.main([], mock_function)
mock_function.assert_called_once_with()
def test_main_fail(self):
"""Verify error in commands are detected."""
with pytest.raises(SystemExit):
cli.main([], Mock(return_value=False))
def test_main_help(self):
"""Verify the help text can be displayed."""
with pytest.raises(SystemExit):
cli.main(['--help'])
def test_main_none(self):
"""Verify it's an error to specify no command."""
with pytest.raises(SystemExit):
cli.main([])
def test_main_interrupt(self):
"""Verify a command can be interrupted."""
with pytest.raises(SystemExit):
cli.main([], Mock(side_effect=KeyboardInterrupt))
def test_main_error(self):
"""Verify runtime errors are handled."""
with pytest.raises(SystemExit):
cli.main([], Mock(side_effect=UncommittedChanges))
with pytest.raises(SystemExit):
cli.main([], Mock(side_effect=ScriptFailure))
class TestInit:
"""Unit tests for the `init` command."""
@patch('gitman.commands.init')
def test_install(self, mock_init):
"""Verify the 'install' command can be run."""
cli.main(['init'])
mock_init.assert_called_once_with(force=False)
class TestInstall:
"""Unit tests for the `install` command."""
@patch('gitman.commands.install')
def test_install(self, mock_install):
"""Verify the 'install' command can be run."""
cli.main(['install'])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch('gitman.commands.install')
def test_install_root(self, mock_install):
"""Verify the project's root can be specified."""
cli.main(['install', '--root', 'mock/path/to/root'])
mock_install.assert_called_once_with(
root='mock/path/to/root',
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch('gitman.commands.install')
def test_install_force(self, mock_install):
"""Verify dependencies can be force-installed."""
cli.main(['install', '--force'])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=True,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch('gitman.commands.install')
def test_install_fetch(self, mock_install):
"""Verify fetching can be enabled."""
cli.main(['install', '--fetch'])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=True,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch('gitman.commands.install')
def test_install_clean(self, mock_install):
"""Verify dependency cleaning can be enabled."""
cli.main(['install', '--clean'])
mock_install.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=True,
skip_changes=False,
skip_default_group=False,
)
@patch('gitman.commands.install')
def test_install_specific_sources(self, mock_install):
"""Verify individual dependencies can be installed."""
cli.main(['install', 'foo', 'bar'])
mock_install.assert_called_once_with(
'foo',
'bar',
root=None,
depth=5,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch('gitman.commands.install')
def test_install_with_depth(self, mock_update):
"""Verify the 'install' command can be limited by depth."""
cli.main(['install', '--depth', '10'])
mock_update.assert_called_once_with(
root=None,
depth=10,
force=False,
force_interactive=False,
fetch=False,
clean=False,
skip_changes=False,
skip_default_group=False,
)
@patch('gitman.commands.install', Mock())
def test_install_with_depth_invalid(self):
"""Verify depths below 1 are rejected."""
with pytest.raises(SystemExit):
cli.main(['install', '--depth', '0'])
with pytest.raises(SystemExit):
cli.main(['install', '--depth', '-1'])
class TestUpdate:
"""Unit tests for the `update` command."""
@patch('gitman.commands.update')
def test_update(self, mock_update):
"""Verify the 'update' command can be run."""
cli.main(['update'])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch('gitman.commands.update')
def test_update_recursive(self, mock_update):
"""Verify the 'update' command can be run recursively."""
cli.main(['update', '--all'])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=True,
lock=None,
skip_changes=False,
)
@patch('gitman.commands.update')
def test_update_no_lock(self, mock_update):
"""Verify the 'update' command can disable locking."""
cli.main(['update', '--skip-lock'])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=False,
skip_changes=False,
)
@patch('gitman.commands.update')
def test_update_skip_changes(self, mock_update):
"""Verify the 'update' command with skip changes option."""
cli.main(['update', '--skip-changes'])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=True,
)
@patch('gitman.commands.update')
def test_update_force(self, mock_update):
"""Verify the 'update' command with force option."""
cli.main(['update', '--force'])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=True,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch('gitman.commands.update')
def test_update_force_interactive(self, mock_update):
"""Verify the 'update' command with force-interactive option."""
cli.main(['update', '--force-interactive'])
mock_update.assert_called_once_with(
root=None,
depth=5,
force=False,
force_interactive=True,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch('gitman.commands.update')
def test_update_specific_sources(self, mock_install):
"""Verify individual dependencies can be installed."""
cli.main(['update', 'foo', 'bar'])
mock_install.assert_called_once_with(
'foo',
'bar',
root=None,
depth=5,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
@patch('gitman.commands.update')
def test_update_with_depth(self, mock_update):
"""Verify the 'update' command can be limited by depth."""
cli.main(['update', '--depth', '10'])
mock_update.assert_called_once_with(
root=None,
depth=10,
force=False,
force_interactive=False,
clean=False,
recurse=False,
lock=None,
skip_changes=False,
)
class TestList:
"""Unit tests for the `list` command."""
@patch('gitman.commands.display')
def test_list(self, mock_display):
"""Verify the 'list' command can be run."""
cli.main(['list'])
mock_display.assert_called_once_with(root=None, depth=5, allow_dirty=True)
@patch('gitman.commands.display')
def test_list_root(self, mock_display):
"""Verify the project's root can be specified."""
cli.main(['list', '--root', 'mock/path/to/root'])
mock_display.assert_called_once_with(
root='mock/path/to/root', depth=5, allow_dirty=True
)
@patch('gitman.commands.display')
def test_list_no_dirty(self, mock_display):
"""Verify the 'list' command can be set to fail when dirty."""
cli.main(['list', '--fail-if-dirty'])
mock_display.assert_called_once_with(root=None, depth=5, allow_dirty=False)
@patch('gitman.commands.display')
def test_update_with_depth(self, mock_update):
"""Verify the 'list' command can be limited by depth."""
cli.main(['list', '--depth', '10'])
mock_update.assert_called_once_with(root=None, depth=10, allow_dirty=True)
def describe_lock():
@patch('gitman.commands.lock')
def with_no_arguments(lock):
cli.main(['lock'])
lock.assert_called_once_with(root=None)
@patch('gitman.commands.lock')
def with_dependencies(lock):
cli.main(['lock', 'foo', 'bar'])
lock.assert_called_once_with('foo', 'bar', root=None)
class TestUninstall:
"""Unit tests for the `uninstall` command."""
@patch('gitman.commands.delete')
def test_uninstall(self, mock_uninstall):
"""Verify the 'uninstall' command can be run."""
cli.main(['uninstall'])
mock_uninstall.assert_called_once_with(
root=None, force=False, keep_location=False
)
@patch('gitman.commands.delete')
def test_uninstall_root(self, mock_uninstall):
"""Verify the project's root can be specified."""
cli.main(['uninstall', '--root', 'mock/path/to/root'])
mock_uninstall.assert_called_once_with(
root='mock/path/to/root', force=False, keep_location=False
)
@patch('gitman.commands.delete')
def test_uninstall_force(self, mock_uninstall):
"""Verify the 'uninstall' command can be forced."""
cli.main(['uninstall', '--force'])
mock_uninstall.assert_called_once_with(
root=None, force=True, keep_location=False
)
@patch('gitman.commands.delete')
def test_uninstall_keep_location(self, mock_uninstall):
"""Verify the 'uninstall' command can be run with keep_location."""
cli.main(['uninstall', '--keep-location'])
mock_uninstall.assert_called_once_with(
root=None, force=False, keep_location=True
)
def describe_show():
@patch('gitman.commands.show')
def with_no_arguments(show):
cli.main(['show'])
show.assert_called_once_with(root=None)
@patch('gitman.commands.show')
def with_root(show):
cli.main(['show', '--root', "mock/root"])
show.assert_called_once_with(root="mock/root")
@patch('gitman.commands.show')
def with_names(show):
cli.main(['show', 'foo', 'bar'])
show.assert_called_once_with('foo', 'bar', root=None)
@patch('gitman.commands.show')
def with_config(show):
cli.main(['show', '--config'])
show.assert_called_once_with('__config__', root=None)
@patch('gitman.commands.show')
def with_log(show):
cli.main(['show', '--log'])
show.assert_called_once_with('__log__', root=None)
def describe_edit():
@patch('gitman.commands.edit')
def with_no_arguments(edit):
cli.main(['edit'])
edit.assert_called_once_with(root=None)
@patch('gitman.commands.edit')
def with_root(edit):
cli.main(['edit', '--root', "mock/root"])
edit.assert_called_once_with(root="mock/root")
def describe_logging():
argument_verbosity = [
(None, 0),
('-v', 1),
('-vv', 2),
('-vvv', 3),
('-vvvv', 4),
('-vvvvv', 4),
('-q', -1),
]
@pytest.mark.parametrize("argument,verbosity", argument_verbosity)
def at_each_level(argument, verbosity):
def function(*args, **kwargs):
log.debug(args)
log.debug(kwargs)
log.warning("warning")
log.error("error")
return True
cli.main([argument] if argument else [], function)
expect(_Config.verbosity) == verbosity
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration test for App Engine.
Primarily tests the deploy operation and upsert load balancer pipeline stage,
which are relatively complex and not well covered by unit tests.
Sample Usage:
Assuming you have created $PASSPHRASE_FILE (which you should chmod 400)
and $CITEST_ROOT points to the root directory of the citest library.
The passphrase file can be omited if you run ssh-agent and add
.ssh/compute_google_engine.
PYTHONPATH=$CITEST_ROOT \
python spinnaker/testing/citest/tests/appengine_smoke_test.py \
--gce_ssh_passphrase_file=$PASSPHRASE_FILE \
--gce_project=$PROJECT \
--gce_zone=$ZONE \
--gce_instance=$INSTANCE
or
PYTHONPATH=$CITEST_ROOT \
python spinnaker/testing/citest/tests/appengine_smoke_test.py \
--native_hostname=host-running-smoke-test
"""
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import citest.gcp_testing as gcp
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
ov_factory = jc.ObservationPredicateFactory()
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
import spinnaker_testing.frigga as frigga
import citest.base
class AppengineSmokeTestScenario(sk.SpinnakerTestScenario):
"""Defines the scenario for the integration test.
We're going to:
Create a Spinnaker Application
Create a Spinnaker Server Group (implicitly creates a Load Balancer)
Create a Pipeline with the following stages
- Deploy
- Upsert Load Balancer
Delete Load Balancer (implicitly destroys the Server Groups
created within this test)
Delete Application
"""
@classmethod
def new_agent(cls, bindings):
return gate.new_agent(bindings)
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser."""
super(AppengineSmokeTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
parser.add_argument(
'--test_gcs_bucket', default=None,
help='URL to use for testing appengine deployment from a bucket.'
' The test will write into this bucket'
' then deploy what it writes.')
parser.add_argument('--git_repo_url', default=None,
help='URL of a GIT source code repository used by Spinnaker to deploy to App Engine.')
parser.add_argument('--branch', default='master',
help='Git branch to be used when deploying from source code repository.')
parser.add_argument('--app_directory_root', default=None,
help='Path from the root of source code repository to the application directory.')
def __init__(self, bindings, agent=None):
super(AppengineSmokeTestScenario, self).__init__(bindings, agent)
if not bindings['GIT_REPO_URL']:
raise ValueError('Must supply value for --git_repo_url')
if not bindings['APP_DIRECTORY_ROOT']:
raise ValueError('Must supply value for --app_directory_root')
self.TEST_APP = bindings['TEST_APP']
self.TEST_STACK = bindings['TEST_STACK']
self.__path = 'applications/%s/tasks' % self.TEST_APP
self.__gcp_project = bindings['APPENGINE_PRIMARY_MANAGED_PROJECT_ID']
self.__cluster_name = frigga.Naming.cluster(self.TEST_APP, self.TEST_STACK)
self.__server_group_name = frigga.Naming.server_group(self.TEST_APP, self.TEST_STACK)
self.__lb_name = self.__cluster_name
# Python is clearly hard-coded as the runtime here, but we're just asking App Engine to be a static file server.
self.__app_yaml = ('\n'.join(['runtime: python27',
'api_version: 1',
'threadsafe: true',
'service: {service}',
'handlers:',
' - url: /.*',
' static_dir: .']).format(service=self.__lb_name))
self.__app_directory_root = bindings['APP_DIRECTORY_ROOT']
self.__branch = bindings['BRANCH']
self.pipeline_id = None
test_bucket = bindings['TEST_GCS_BUCKET']
if test_bucket:
self.__prepare_bucket(test_bucket)
self.__test_repository_url = 'gs://' + test_bucket
else:
self.__test_repository_url = bindings['GIT_REPO_URL']
def __prepare_bucket(self, bucket):
root = self.bindings['APP_DIRECTORY_ROOT']
temp = tempfile.mkdtemp()
local_path = os.path.join(temp, root)
branch = self.bindings['BRANCH']
git_repo = self.bindings['GIT_REPO_URL']
gcs_path = 'gs://{bucket}/{root}'.format(
bucket=self.bindings['TEST_GCS_BUCKET'], root=root)
try:
command = 'git clone {repo} -b {branch} {dir}'.format(
repo=git_repo, branch=branch, dir=temp)
logging.info('Fetching %s', git_repo)
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
command = 'gsutil -m rsync {local} {gcs}'.format(
local=local_path, gcs=gcs_path)
logging.info('Preparing %s', gcs_path)
subprocess.Popen(command, stderr=sys.stderr, shell=True).wait()
finally:
shutil.rmtree(local_path)
def create_app(self):
# Not testing create_app, since the operation is well tested elsewhere.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings,
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT']),
contract=contract)
def delete_app(self):
# Not testing delete_app, since the operation is well tested elsewhere.
contract = jc.Contract()
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.bindings['SPINNAKER_APPENGINE_ACCOUNT']),
contract=contract)
def create_server_group(self):
group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.bindings['TEST_STACK'],
version='v000')
job_spec = {
'application': self.TEST_APP,
'stack': self.TEST_STACK,
'credentials': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'repositoryUrl': self.__test_repository_url,
'applicationDirectoryRoot': self.__app_directory_root,
'configFiles': [self.__app_yaml],
'type': 'createServerGroup',
'cloudProvider': 'appengine',
'region': 'us-central'
}
if not self.__test_repository_url.startswith('gs://'):
job_spec.update({
'gitCredentialType': 'NONE',
'branch': self.__branch
})
payload = self.agent.make_json_payload_from_kwargs(job=[job_spec],
description='Create Server Group in ' + group_name,
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Version Added', retryable_for_secs=30)
.inspect_resource('apps.services.versions',
group_name,
appsId=self.__gcp_project,
servicesId=self.__lb_name)
.EXPECT(ov_factory.value_list_path_contains(
'servingStatus', jp.STR_EQ('SERVING'))))
return st.OperationContract(
self.new_post_operation(
title='create_server_group', data=payload, path='tasks'),
contract=builder.build())
def make_deploy_stage(self):
cluster_spec = {
'account': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'applicationDirectoryRoot': self.__app_directory_root,
'configFiles': [self.__app_yaml],
'application': self.TEST_APP,
'cloudProvider': 'appengine',
'provider': 'appengine',
'region': 'us-central',
'repositoryUrl': self.__test_repository_url,
'stack': self.TEST_STACK
}
if not self.__test_repository_url.startswith('gs://'):
cluster_spec.update({
'gitCredentialType': 'NONE',
'branch': self.__branch
})
result = {
'clusters': [cluster_spec],
'name': 'Deploy',
'refId': '1',
'requisiteStageRefIds': [],
'type': 'deploy'
}
return result
def make_upsert_load_balancer_stage(self):
result = {
'cloudProvider': 'appengine',
'loadBalancers': [
{
'cloudProvider': 'appengine',
'credentials': self.bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'loadBalancerName': self.__lb_name,
'migrateTraffic': False,
'name': self.__lb_name,
'region': 'us-central',
'splitDescription': {
'allocationDescriptions': [
{
'allocation': 0.1,
'cluster': self.__cluster_name,
'locatorType': 'targetCoordinate',
'target': 'current_asg_dynamic'
},
{
'allocation': 0.9,
'cluster': self.__cluster_name,
'locatorType': 'targetCoordinate',
'target': 'ancestor_asg_dynamic'
}
],
'shardBy': 'IP'
}
}
],
'name': 'Edit Load Balancer',
'refId': '2',
'requisiteStageRefIds': ['1'],
'type': 'upsertAppEngineLoadBalancers'
}
return result
def create_deploy_upsert_load_balancer_pipeline(self):
name = 'promoteServerGroupPipeline'
self.pipeline_id = name
deploy_stage = self.make_deploy_stage()
upsert_load_balancer_stage = self.make_upsert_load_balancer_stage()
pipeline_spec = dict(
name=name,
stages=[deploy_stage, upsert_load_balancer_stage],
triggers=[],
application=self.TEST_APP,
stageCounter=2,
parallel=True,
limitConcurrent=True,
appConfig={},
index=0
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline',
retryable_for_secs=5)
.get_url_path('applications/{0}/pipelineConfigs'.format(self.TEST_APP))
.contains_path_value(None, pipeline_spec))
return st.OperationContract(
self.new_post_operation(
title='create_deploy_upsert_load_balancer_pipeline', data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def run_deploy_upsert_load_balancer_pipeline(self):
url_path = 'pipelines/{0}/{1}'.format(self.TEST_APP, self.pipeline_id)
previous_group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.TEST_STACK,
version='v000')
deployed_group_name = frigga.Naming.server_group(
app=self.TEST_APP,
stack=self.TEST_STACK,
version='v001')
payload = self.agent.make_json_payload_from_kwargs(
type='manual',
user='[anonymous]')
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Service Modified', retryable_for_secs=30)
.inspect_resource('apps.services',
self.__lb_name,
appsId=self.__gcp_project)
.EXPECT(
ov_factory.value_list_path_contains(
jp.build_path('split', 'allocations'),
jp.DICT_MATCHES({previous_group_name: jp.NUM_EQ(0.9),
deployed_group_name: jp.NUM_EQ(0.1)}))))
return st.OperationContract(
self.new_post_operation(
title='run_deploy_upsert_load_balancer_pipeline',
data=payload, path=url_path),
builder.build())
def delete_load_balancer(self):
bindings = self.bindings
payload = self.agent.make_json_payload_from_kwargs(
job=[{
'type': 'deleteLoadBalancer',
'cloudProvider': 'appengine',
'loadBalancerName': self.__lb_name,
'account': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'credentials': bindings['SPINNAKER_APPENGINE_ACCOUNT'],
'user': '[anonymous]'
}],
description='Delete Load Balancer: {0} in {1}'.format(
self.__lb_name,
bindings['SPINNAKER_APPENGINE_ACCOUNT']),
application=self.TEST_APP)
builder = gcp.GcpContractBuilder(self.appengine_observer)
(builder.new_clause_builder('Service Deleted', retryable_for_secs=30)
.inspect_resource('apps.services',
self.__lb_name,
appsId=self.__gcp_project)
.EXPECT(
ov_factory.error_list_contains(gcp.HttpErrorPredicate(http_code=404))))
return st.OperationContract(
self.new_post_operation(
title='delete_load_balancer', data=payload, path='tasks'),
contract=builder.build())
class AppengineSmokeTest(st.AgentTestCase):
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(AppengineSmokeTestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app())
def test_b_create_server_group(self):
self.run_test_case(self.scenario.create_server_group())
def test_c_create_pipeline(self):
self.run_test_case(self.scenario.create_deploy_upsert_load_balancer_pipeline())
def test_d_run_pipeline(self):
self.run_test_case(self.scenario.run_deploy_upsert_load_balancer_pipeline())
def test_y_delete_load_balancer(self):
self.run_test_case(self.scenario.delete_load_balancer(),
retry_interval_secs=8, max_retries=8)
def test_z_delete_app(self):
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
defaults = {
'TEST_STACK': AppengineSmokeTestScenario.DEFAULT_TEST_ID,
'TEST_APP': 'gaesmoketest' + AppengineSmokeTestScenario.DEFAULT_TEST_ID,
}
return citest.base.TestRunner.main(
parser_inits=[AppengineSmokeTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[AppengineSmokeTest])
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
from neutron.i18n import _LE, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import pci_lib
LOG = logging.getLogger(__name__)
class PciOsWrapper(object):
"""OS wrapper for checking virtual functions"""
DEVICE_PATH = "/sys/class/net/%s/device"
PCI_PATH = "/sys/class/net/%s/device/virtfn%s/net"
VIRTFN_FORMAT = r"^virtfn(?P<vf_index>\d+)"
VIRTFN_REG_EX = re.compile(VIRTFN_FORMAT)
MAC_VTAP_PREFIX = "upper_macvtap*"
@classmethod
def scan_vf_devices(cls, dev_name):
"""Scan os directories to get VF devices
@param dev_name: pf network device name
@return: list of virtual functions
"""
vf_list = []
dev_path = cls.DEVICE_PATH % dev_name
if not os.path.isdir(dev_path):
LOG.error(_LE("Failed to get devices for %s"), dev_name)
raise exc.InvalidDeviceError(dev_name=dev_name,
reason=_("Device not found"))
file_list = os.listdir(dev_path)
for file_name in file_list:
pattern_match = cls.VIRTFN_REG_EX.match(file_name)
if pattern_match:
vf_index = int(pattern_match.group("vf_index"))
file_path = os.path.join(dev_path, file_name)
if os.path.islink(file_path):
file_link = os.readlink(file_path)
pci_slot = os.path.basename(file_link)
vf_list.append((pci_slot, vf_index))
if not vf_list:
raise exc.InvalidDeviceError(
dev_name=dev_name,
reason=_("Device has no virtual functions"))
return vf_list
@classmethod
def is_assigned_vf(cls, dev_name, vf_index):
"""Check if VF is assigned.
Checks if a given vf index of a given device name is assigned
by checking the relevant path in the system:
VF is assigned if:
Direct VF: PCI_PATH does not exist.
Macvtap VF: upper_macvtap path exists.
@param dev_name: pf network device name
@param vf_index: vf index
"""
path = cls.PCI_PATH % (dev_name, vf_index)
if not os.path.isdir(path):
return True
upper_macvtap_path = os.path.join(path, "*", cls.MAC_VTAP_PREFIX)
return bool(glob.glob(upper_macvtap_path))
class EmbSwitch(object):
"""Class to manage logical embedded switch entity.
Embedded Switch object is logical entity representing all VFs
connected to same physical network
Each physical network is mapped to PF network device interface,
meaning all its VF, excluding the devices in exclude_device list.
@ivar pci_slot_map: dictionary for mapping each pci slot to vf index
@ivar pci_dev_wrapper: pci device wrapper
"""
def __init__(self, phys_net, dev_name, exclude_devices):
"""Constructor
@param phys_net: physical network
@param dev_name: network device name
@param exclude_devices: list of pci slots to exclude
"""
self.phys_net = phys_net
self.dev_name = dev_name
self.pci_slot_map = {}
self.pci_dev_wrapper = pci_lib.PciDeviceIPWrapper(dev_name)
self._load_devices(exclude_devices)
def _load_devices(self, exclude_devices):
"""Load devices from driver and filter if needed.
@param exclude_devices: excluded devices mapping device_name: pci slots
"""
scanned_pci_list = PciOsWrapper.scan_vf_devices(self.dev_name)
for pci_slot, vf_index in scanned_pci_list:
if pci_slot not in exclude_devices:
self.pci_slot_map[pci_slot] = vf_index
def get_pci_slot_list(self):
"""Get list of VF addresses."""
return self.pci_slot_map.keys()
def get_assigned_devices(self):
"""Get assigned Virtual Functions.
@return: list of VF mac addresses
"""
vf_list = []
assigned_macs = []
for vf_index in self.pci_slot_map.itervalues():
if not PciOsWrapper.is_assigned_vf(self.dev_name, vf_index):
continue
vf_list.append(vf_index)
if vf_list:
assigned_macs = self.pci_dev_wrapper.get_assigned_macs(vf_list)
return assigned_macs
def get_device_state(self, pci_slot):
"""Get device state.
@param pci_slot: Virtual Function address
"""
vf_index = self.pci_slot_map.get(pci_slot)
if vf_index is None:
LOG.warning(_LW("Cannot find vf index for pci slot %s"),
pci_slot)
raise exc.InvalidPciSlotError(pci_slot=pci_slot)
return self.pci_dev_wrapper.get_vf_state(vf_index)
def set_device_state(self, pci_slot, state):
"""Set device state.
@param pci_slot: Virtual Function address
@param state: link state
"""
vf_index = self.pci_slot_map.get(pci_slot)
if vf_index is None:
LOG.warning(_LW("Cannot find vf index for pci slot %s"),
pci_slot)
raise exc.InvalidPciSlotError(pci_slot=pci_slot)
return self.pci_dev_wrapper.set_vf_state(vf_index, state)
def get_pci_device(self, pci_slot):
"""Get mac address for given Virtual Function address
@param pci_slot: pci slot
@return: MAC address of virtual function
"""
vf_index = self.pci_slot_map.get(pci_slot)
mac = None
if vf_index is not None:
if PciOsWrapper.is_assigned_vf(self.dev_name, vf_index):
macs = self.pci_dev_wrapper.get_assigned_macs([vf_index])
if macs:
mac = macs[0]
return mac
class ESwitchManager(object):
"""Manages logical Embedded Switch entities for physical network."""
def __init__(self, device_mappings, exclude_devices):
"""Constructor.
Create Embedded Switch logical entities for all given device mappings,
using exclude devices.
"""
self.emb_switches_map = {}
self.pci_slot_map = {}
self._discover_devices(device_mappings, exclude_devices)
def device_exists(self, device_mac, pci_slot):
"""Verify if device exists.
Check if a device mac exists and matches the given VF pci slot
@param device_mac: device mac
@param pci_slot: VF address
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
return True
return False
def get_assigned_devices(self, phys_net=None):
"""Get all assigned devices.
Get all assigned devices belongs to given embedded switch
@param phys_net: physical network, if none get all assigned devices
@return: set of assigned VFs mac addresses
"""
if phys_net:
embedded_switch = self.emb_switches_map.get(phys_net, None)
if not embedded_switch:
return set()
eswitch_objects = [embedded_switch]
else:
eswitch_objects = self.emb_switches_map.values()
assigned_devices = set()
for embedded_switch in eswitch_objects:
for device_mac in embedded_switch.get_assigned_devices():
assigned_devices.add(device_mac)
return assigned_devices
def get_device_state(self, device_mac, pci_slot):
"""Get device state.
Get the device state (up/True or down/False)
@param device_mac: device mac
@param pci_slot: VF pci slot
@return: device state (True/False) None if failed
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
return embedded_switch.get_device_state(pci_slot)
return False
def set_device_state(self, device_mac, pci_slot, admin_state_up):
"""Set device state
Sets the device state (up or down)
@param device_mac: device mac
@param pci_slot: pci slot
@param admin_state_up: device admin state True/False
"""
embedded_switch = self._get_emb_eswitch(device_mac, pci_slot)
if embedded_switch:
embedded_switch.set_device_state(pci_slot,
admin_state_up)
def _discover_devices(self, device_mappings, exclude_devices):
"""Discover which Virtual functions to manage.
Discover devices, and create embedded switch object for network device
@param device_mappings: device mapping physical_network:device_name
@param exclude_devices: excluded devices mapping device_name: pci slots
"""
if exclude_devices is None:
exclude_devices = {}
for phys_net, dev_name in device_mappings.iteritems():
self._create_emb_switch(phys_net, dev_name,
exclude_devices.get(dev_name, set()))
def _create_emb_switch(self, phys_net, dev_name, exclude_devices):
embedded_switch = EmbSwitch(phys_net, dev_name, exclude_devices)
self.emb_switches_map[phys_net] = embedded_switch
for pci_slot in embedded_switch.get_pci_slot_list():
self.pci_slot_map[pci_slot] = embedded_switch
def _get_emb_eswitch(self, device_mac, pci_slot):
"""Get embedded switch.
Get embedded switch by pci slot and validate pci has device mac
@param device_mac: device mac
@param pci_slot: pci slot
"""
embedded_switch = self.pci_slot_map.get(pci_slot)
if embedded_switch:
used_device_mac = embedded_switch.get_pci_device(pci_slot)
if used_device_mac != device_mac:
LOG.warning(_LW("device pci mismatch: %(device_mac)s "
"- %(pci_slot)s"),
{"device_mac": device_mac, "pci_slot": pci_slot})
embedded_switch = None
return embedded_switch
| |
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import datetime
import math
import os
import uuid
from dataclasses import dataclass, field
from typing import List, Any, Optional, Callable, Dict, Union
import numpy as np
import cirq
import cirq_google as cg
from cirq import work, study, circuits, ops
from cirq_google.engine.engine_job import TERMINAL_STATES
def _get_program_id(program: Any):
"""Get a program id from program.program_id.
This is not an actual attribute of cirq.Circuit, but thanks to the magic
of python, it can be! If your circuit does not have a program_id,
this function will return a uuid4().
Program ids can only contain alphanumeric and -_ so we replace
"/" and ":" which are common in our data collection idioms.
Program ids must be unique and sometimes you need to re-try a particular
experiment, so we append a random component.
Program ids can only be 64 characters. The following
compression is performed: the program id is split according to `/`
and the middle part of each resulting string is omitted to get the
length below 64. The parts are joined back with _ since `/` is not
allowed. If your program id is *really* long, we give up and return
a uuid4().
"""
if not hasattr(program, 'program_id'):
return str(uuid.uuid4())
program_id: str = program.program_id
program_id = program_id.replace(':', '')
parts = program_id.split('/')
parts.append(str(uuid.uuid4()))
chars_per_part = math.floor(64 / len(parts)) - 1
if chars_per_part < 3:
print("Program id too long!")
return str(uuid.uuid4())
parts = [p if len(p) <= chars_per_part
else p[:chars_per_part // 2] + p[-chars_per_part // 2:]
for p in parts]
return '_'.join(parts)
class EngineSampler(work.Sampler):
"""Temporary shim; to be replaced with QuantumEngineSampler.
Missing features from QuantumEngineSampler:
- Gateset by string name and project_id by environment variable.
See https://github.com/quantumlib/Cirq/pull/2767.
- Extracts program_id from an optional attribute on Circuit.
Potentially to be fixed by using the "tags" feature and
adding this as an optional attribute to Circuit.
See https://github.com/quantumlib/Cirq/issues/2816
- Asynchronous execution
- No maximum number of requests for results.
See https://github.com/quantumlib/Cirq/issues/2817
"""
def __init__(self, processor_id: str, gateset: str):
project_id = os.environ['GOOGLE_CLOUD_PROJECT']
engine = cg.Engine(project_id=project_id,
proto_version=cg.ProtoVersion.V2)
self.engine = engine
self.processor_id = processor_id
if gateset == 'sycamore':
self.gate_set = cg.SYC_GATESET
elif gateset == 'sqrt-iswap':
self.gate_set = cg.SQRT_ISWAP_GATESET
else:
raise ValueError("Unknown gateset {}".format(gateset))
def run(
self,
program: 'cirq.Circuit',
param_resolver: 'cirq.ParamResolverOrSimilarType' = None,
repetitions: int = 1,
) -> 'cirq.Result':
if param_resolver is None:
param_resolver = study.ParamResolver({})
return self.engine.run(
program=program,
program_id=_get_program_id(program),
param_resolver=param_resolver,
repetitions=repetitions,
processor_ids=[self.processor_id],
gate_set=self.gate_set,
)
def run_sweep(
self,
program: 'cirq.Circuit',
params: 'cirq.Sweepable',
repetitions: int = 1,
) -> List['cirq.Result']:
return self.engine.run_sweep(
program=program,
params=params,
program_id=_get_program_id(program),
repetitions=repetitions,
processor_ids=[self.processor_id],
gate_set=self.gate_set,
).results()
async def run_async(self, program: 'cirq.Circuit',
*, repetitions: int) -> 'cirq.Result':
program_id = _get_program_id(program)
engine_job = self.engine.run_sweep(
program=program,
program_id=program_id,
repetitions=repetitions,
processor_ids=[self.processor_id],
gate_set=self.gate_set,
)
job = engine_job._refresh_job()
while True:
if job.execution_status.state in TERMINAL_STATES:
break
await asyncio.sleep(1.0)
job = engine_job._refresh_job()
print(f"Done: {program_id}")
engine_job._raise_on_failure(job)
response = engine_job.context.client.get_job_results(
engine_job.project_id, engine_job.program_id, engine_job.job_id)
result = response.result
v2_parsed_result = cg.api.v2.result_pb2.Result()
v2_parsed_result.ParseFromString(result.value)
return engine_job._get_job_results_v2(v2_parsed_result)[0]
class ZerosSampler(work.Sampler):
"""Shim for an object that should be in Cirq.
See https://github.com/quantumlib/Cirq/issues/2818.
"""
def run_sweep(
self,
program: 'cirq.Circuit',
params: 'cirq.Sweepable',
repetitions: int = 1,
) -> List['cirq.Result']:
assert isinstance(program, circuits.Circuit)
meas = list(program.findall_operations_with_gate_type(
ops.MeasurementGate))
if len(meas) == 0:
raise ValueError()
elif len(meas) > 1:
for _, m, _ in meas:
assert len(m.qubits) == 1
results = [
study.Result(
params=p,
measurements={gate.key: np.zeros(
(repetitions, 1), dtype=int)
for _, _, gate in meas})
for p in study.to_resolvers(params)
]
else:
assert len(meas) == 1
i, op, gate = meas[0]
n_qubits = len(op.qubits)
k = gate.key
results = [
study.Result(
params=p,
measurements={k: np.zeros(
(repetitions, n_qubits), dtype=int)})
for p in study.to_resolvers(params)
]
return results
async def run_async(self, program: 'cirq.Circuit',
*, repetitions: int) -> 'cirq.Result':
program_id = _get_program_id(program)
await asyncio.sleep(0.1)
results = self.run_sweep(program, study.UnitSweep, repetitions)
print(f"Done: {program_id}")
return results[0]
@dataclass(frozen=True)
class QuantumProcessor:
"""Grouping of relevant info
https://github.com/quantumlib/Cirq/issues/2820
"""
name: str
device_obj: cirq.Device
processor_id: Optional[str]
is_simulator: bool
_cached_samplers: Dict[Union[None, str], cirq.Sampler] \
= field(default_factory=dict)
_get_sampler_func: Callable[['QuantumProcessor', str], cirq.Sampler] = None
def get_sampler(self, gateset: str = None):
"""Why must gateset be supplied?
https://github.com/quantumlib/Cirq/issues/2819
"""
if gateset not in self._cached_samplers:
sampler = self._get_sampler_func(self, gateset)
self._cached_samplers[gateset] = sampler
return self._cached_samplers[gateset]
class EngineQuantumProcessor:
def __init__(self, processor_id: str):
self.name = processor_id
self.processor_id = processor_id
self.is_simulator = False
self._engine = None
@property
def engine(self):
if self._engine is None:
project_id = os.environ['GOOGLE_CLOUD_PROJECT']
engine = cg.Engine(project_id=project_id,
proto_version=cg.ProtoVersion.V2)
self._engine = engine
return self._engine
def get_sampler(self, gateset: str = None):
if gateset == 'sycamore':
gateset = cg.SYC_GATESET
elif gateset == 'sqrt-iswap':
gateset = cg.SQRT_ISWAP_GATESET
else:
raise ValueError("Unknown gateset {}".format(gateset))
return self.engine.sampler(processor_id=self.processor_id, gate_set=gateset)
@property
def device_obj(self):
dspec = self.engine.get_processor(self.processor_id).get_device_specification()
device = cg.SerializableDevice.from_proto(proto=dspec, gate_sets=[])
return device
QUANTUM_PROCESSORS = {
'Sycamore23': QuantumProcessor(
name='Sycamore23',
device_obj=cg.Sycamore23,
processor_id='rainbow',
is_simulator=False,
_get_sampler_func=lambda x, gs: EngineSampler(
processor_id=x.processor_id, gateset=gs),
),
'Syc23-noiseless': QuantumProcessor(
name='Syc23-noiseless',
device_obj=cg.Sycamore23,
processor_id=None,
is_simulator=True,
_get_sampler_func=lambda x, gs: cirq.Simulator(),
),
'Syc23-simulator': QuantumProcessor(
name='Syc23-simulator',
device_obj=cg.Sycamore23,
processor_id=None,
is_simulator=True,
_get_sampler_func=lambda x, gs: cirq.DensityMatrixSimulator(
noise=cirq.ConstantQubitNoiseModel(
qubit_noise_gate=cirq.DepolarizingChannel(0.005)
))
),
'Syc23-simulator-tester': QuantumProcessor(
# This simulator has a constant seed for consistent testing
name='Syc23-simulator-tester',
device_obj=cg.Sycamore23,
processor_id=None,
is_simulator=True,
_get_sampler_func=lambda x, gs: cirq.DensityMatrixSimulator(
noise=cirq.ConstantQubitNoiseModel(
qubit_noise_gate=cirq.DepolarizingChannel(0.005)
), seed=1234)
),
'Syc23-zeros': QuantumProcessor(
name='Syc23-zeros',
device_obj=cg.Sycamore23,
processor_id=None,
is_simulator=True,
_get_sampler_func=lambda x, gs: ZerosSampler()
),
'Syc54-noiseless': QuantumProcessor(
name='Syc54-noiseless',
device_obj=cg.Sycamore,
processor_id=None,
is_simulator=True,
_get_sampler_func=lambda x, gs: cirq.Simulator(),
),
'Syc54-simulator': QuantumProcessor(
name='Syc54-simulator',
device_obj=cg.Sycamore,
processor_id=None,
is_simulator=True,
_get_sampler_func=lambda x, gs: cirq.DensityMatrixSimulator(
noise=cirq.ConstantQubitNoiseModel(
qubit_noise_gate=cirq.DepolarizingChannel(0.005)
))
),
'Syc54-zeros': QuantumProcessor(
name='Syc54-zeros',
device_obj=cg.Sycamore,
processor_id=None,
is_simulator=True,
_get_sampler_func=lambda x, gs: ZerosSampler()
)
}
def get_device_obj_by_name(device_name: str):
return QUANTUM_PROCESSORS[device_name].device_obj
def get_processor_id_by_device_name(device_name: str):
return QUANTUM_PROCESSORS[device_name].processor_id
def get_sampler_by_name(device_name: str, *,
gateset='sycamore'):
return QUANTUM_PROCESSORS[device_name].get_sampler(gateset)
async def execute_in_queue(func, tasks, num_workers: int):
"""Maintain a respectful queue of work
Args:
func: This function will be called on each param
tasks: Call func on each of these
num_workers: The number of async workers. This corresponds roughly
to the maintained queue depth.
"""
queue = asyncio.Queue()
async def worker():
while True:
task = await queue.get()
print(f"Processing {task.fn}. Current queue size: {queue.qsize()}")
await func(task)
print(f"{task.fn} completed")
queue.task_done()
worker_jobs = [asyncio.create_task(worker()) for _ in range(num_workers)]
for task in tasks:
await queue.put(task)
print("Added everything to the queue. Current queue size: {}"
.format(queue.qsize()))
await queue.join()
for wjob in worker_jobs:
wjob.cancel()
def _get_current_time():
return datetime.datetime.now()
| |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a session object to service Bokeh documents in external Python
clients to a Bokeh server.
Use-Cases
~~~~~~~~~
A client session has two primary uses:
* Implementing automated testing infrastructure around Bokeh server
applications.
* Creating and customizing specific sessions of a Bokeh server application
(running *in the Bokeh server*) before passing them on to a specific
viewer.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from six.moves.urllib.parse import quote_plus
# Bokeh imports
from ..document import Document
from ..resources import _SessionCoordinates, DEFAULT_SERVER_HTTP_URL
from ..util.browser import NEW_PARAM
from ..util.session_id import generate_session_id
from ..util.string import format_docstring
from .util import server_url_for_websocket_url, websocket_url_for_server_url
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_SESSION_ID = "default"
DEFAULT_SERVER_WEBSOCKET_URL = websocket_url_for_server_url(DEFAULT_SERVER_HTTP_URL)
__all__ = (
'ClientSession',
'pull_session',
'push_session',
'show_session',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def pull_session(session_id=None, url='default', io_loop=None, arguments=None):
''' Create a session by loading the current server-side document.
``session.document`` will be a fresh document loaded from
the server. While the connection to the server is open,
changes made on the server side will be applied to this
document, and changes made on the client side will be
synced to the server.
If you don't plan to modify ``session.document`` you probably
don't need to use this function; instead you can directly
``show_session()`` or ``server_session()`` without downloading
the session's document into your process first. It's much
more efficient to avoid downloading the session if you don't need
to.
In a production scenario, the ``session_id`` should be
unique for each browser tab, which keeps users from
stomping on each other. It's neither scalable nor secure to
use predictable session IDs or to share session IDs across
users.
For a notebook running on a single machine, ``session_id``
could be something human-readable such as ``"default"`` for
convenience.
If you allow ``pull_session()`` to generate a unique
``session_id``, you can obtain the generated ID with the
``id`` property on the returned ``ClientSession``.
Args:
session_id (string, optional) :
The name of the session, None to autogenerate a random one (default: None)
url : (str, optional): The URL to a Bokeh application on a Bokeh server
can also be `"default"` which will connect to the default app URL
io_loop (``tornado.ioloop.IOLoop``, optional) :
The ``IOLoop`` to use for the websocket
arguments (dict[str, str], optional) :
A dictionary of key/values to be passed as HTTP request arguments
to Bokeh application code (default: None)
Note that should only be provided when pulling new sessions.
If ``session_id`` is not None, or a session with ``session_id``
already exists, these arguments will have no effect.
Returns:
ClientSession :
A new ``ClientSession`` connected to the server
'''
coords = _SessionCoordinates(session_id=session_id, url=url)
session = ClientSession(session_id=session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop, arguments=arguments)
session.pull()
return session
def push_session(document, session_id=None, url='default', io_loop=None):
''' Create a session by pushing the given document to the server,
overwriting any existing server-side document.
``session.document`` in the returned session will be your supplied
document. While the connection to the server is open, changes made on the
server side will be applied to this document, and changes made on the
client side will be synced to the server.
In a production scenario, the ``session_id`` should be unique for each
browser tab, which keeps users from stomping on each other. It's neither
scalable nor secure to use predictable session IDs or to share session
IDs across users.
For a notebook running on a single machine, ``session_id`` could be
something human-readable such as ``"default"`` for convenience.
If you allow ``push_session()`` to generate a unique ``session_id``, you
can obtain the generated ID with the ``id`` property on the returned
``ClientSession``.
Args:
document : (bokeh.document.Document)
The document to be pushed and set as session.document
session_id : (string, optional)
The name of the session, None to autogenerate a random one (default: None)
url : (str, optional): The URL to a Bokeh application on a Bokeh server
can also be `"default"` which will connect to the default app URL
io_loop : (tornado.ioloop.IOLoop, optional)
The IOLoop to use for the websocket
Returns:
ClientSession
A new ClientSession connected to the server
'''
coords = _SessionCoordinates(session_id=session_id, url=url)
session = ClientSession(session_id=coords.session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop)
session.push(document)
return session
def show_session(session_id=None, url='default', session=None, browser=None, new="tab", controller=None):
''' Open a browser displaying a session document.
If you have a session from ``pull_session()`` or ``push_session`` you
can ``show_session(session=mysession)``. If you don't need to open a
connection to the server yourself, you can show a new session in a
browser by providing just the ``url``.
Args:
session_id (string, optional) :
The name of the session, None to autogenerate a random one (default: None)
url : (str, optional): The URL to a Bokeh application on a Bokeh server
can also be `"default"` which will connect to the default app URL
session (ClientSession, optional) : session to get session ID and server URL from
If you specify this, you don't need to specify session_id and url
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
'''
if session is not None:
server_url = server_url_for_websocket_url(session._connection.url)
session_id = session.id
else:
coords = _SessionCoordinates(session_id=session_id, url=url)
server_url = coords.url
session_id = coords.session_id
if controller is None:
from bokeh.util.browser import get_browser_controller
controller = get_browser_controller(browser=browser)
controller.open(server_url + "?bokeh-session-id=" + quote_plus(session_id),
new=NEW_PARAM[new])
class ClientSession(object):
''' Represents a websocket connection to a server-side session.
Each server session stores a Document, which is kept in sync with the
corresponding Document for this ``ClientSession`` instance. Updates on
either side of the connection will automatically propagate to the other
side, as long as the connection is open.
ClientSession objects can (and usually should) be used as a context manager
so that the session is properly closed:
.. code-block:: python
with pull_session(url=app_url) as mysession:
# customize session here
script = server_session(session_id=mysession.id, url=app_url)
return render_template("embed.html", script=script, template="Flask")
If you do not use ``ClientSession`` in this way, it is up to you to ensure
that ``mysession.close()`` is called.
'''
def __init__(self, session_id=None, websocket_url=DEFAULT_SERVER_WEBSOCKET_URL, io_loop=None, arguments=None):
''' A connection which attaches to a particular named session on the
server.
Always call either pull() or push() immediately after creating the
session (until these are called ``session.document`` will be ``None``).
The :func:`~bokeh.client.session.push_session` and
:func:`~bokeh.client.session.pull_session()` functions will construct a
``ClientSession`` and push or pull in one step, so they are a good way to
obtain a ``ClientSession``.
Args:
session_id (str) :
The name of the session or None to generate one
websocket_url (str) :
Websocket URL to connect to
io_loop (IOLoop, optional) :
The IOLoop to use for the websocket
arguments (dict[str, str], optional) :
A dictionary of key/values to be passed as HTTP request
arguments to Bokeh application code (default: None)
Note that should only be provided when pulling new sessions.
If ``session_id`` is not None, or a session with ``session_id``
already exists, these arguments will have no effect.
'''
self._document = None
self._id = self._ensure_session_id(session_id)
from .connection import ClientConnection
self._connection = ClientConnection(session=self, io_loop=io_loop, websocket_url=websocket_url, arguments=arguments)
from ..server.callbacks import _DocumentCallbackGroup
self._callbacks = _DocumentCallbackGroup(self._connection.io_loop)
def __enter__(self):
'''
'''
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
'''
'''
self.close()
# Properties --------------------------------------------------------------
@property
def connected(self):
''' Whether this session is currently connected. '''
return self._connection.connected
@property
def document(self):
''' A :class:`~bokeh.document.Document` that will be kept in sync with
the corresponding Document on the server.
This value is initialized when :func:`pull` or :func:`push` succeeds.
It will be ``None`` until then.
'''
return self._document
@property
def id(self):
''' A unique ID for this session. '''
return self._id
# Public methods ----------------------------------------------------------
def connect(self):
''' Connect to a Bokeh server at the configured URL. '''
self._connection.connect()
def close(self, why="closed"):
''' Close the connection to the server. '''
self._connection.close(why)
def force_roundtrip(self):
''' Force a round-trip request/reply to the server, sometimes needed to
avoid race conditions. Mostly useful for testing.
Outside of test suites, this method hurts performance and should not be
needed.
Returns:
None
'''
self._connection.force_roundtrip()
def loop_until_closed(self, suppress_warning=False):
''' Execute a blocking loop that runs and executes event callbacks
until the connection is closed (e.g. by hitting Ctrl-C).
While this method can be used to run Bokeh application code "outside"
the Bokeh server, this practice is HIGHLY DISCOURAGED for any real
use case. This function is intented to facilitate testing ONLY.
'''
suppress_warning # shut up flake
from bokeh.util.deprecation import deprecated
deprecated("ClientSession.loop_until_closed is deprecated, and will be removed in an eventual 2.0 release. "
"Run Bokeh applications directly on a Bokeh server instead. See:\n\n"
" https//docs.bokeh.org/en/latest/docs/user_guide/server.html\n")
self._connection.loop_until_closed()
def pull(self):
''' Pull the server's state and set it as session.document.
If this is called more than once, session.document will be the same
object instance but its contents will be overwritten.
Automatically calls :func:`connect` before pulling.
'''
self.connect()
if not self.connected:
raise IOError("Cannot pull session document because we failed to connect to the server (to start the server, try the 'bokeh serve' command)")
if self.document is None:
doc = Document()
else:
doc = self.document
self._connection.pull_doc(doc)
if self.document is None:
self._attach_document(doc)
def push(self, document=None):
''' Push the given document to the server and record it as session.document.
If this is called more than once, the Document has to be the same (or None
to mean "session.document").
.. note::
Automatically calls :func:`~connect` before pushing.
Args:
document (:class:`~bokeh.document.Document`, optional) :
The document which will be kept in sync with the server document.
None to use session.document or create a new document.
'''
if self.document is None:
if document is None:
doc = Document()
else:
doc = document
else:
if document is None:
doc = self.document
else:
raise ValueError("Cannot push() a different document from existing session.document")
self.connect()
if not self.connected:
raise IOError("Cannot push session document because we failed to connect to the server (to start the server, try the 'bokeh serve' command)")
self._connection.push_doc(doc)
if self._document is None:
self._attach_document(doc)
def request_server_info(self):
''' Ask for information about the server.
Returns:
A dictionary of server attributes.
'''
return self._connection.request_server_info()
def show(self, obj=None, browser=None, new="tab"):
''' Open a browser displaying this session.
Args:
obj (LayoutDOM object, optional) : a Layout (Row/Column),
Plot or Widget object to display. The object will be added
to the session's document.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
'''
if obj and obj not in self.document.roots:
self.document.add_root(obj)
show_session(session=self, browser=browser, new=new)
# Internal methods --------------------------------------------------------
def _attach_document(self, document):
self._document = document
self._document.on_change_dispatch_to(self)
self._callbacks.add_session_callbacks(self._document.session_callbacks)
def _document_patched(self, event):
if event.setter is self:
log.debug("Not sending notification back to server for a change it requested")
return
# TODO (havocp): our "change sync" protocol is flawed
# because if both sides change the same attribute at the
# same time, they will each end up with the state of the
# other and their final states will differ.
self._connection._send_patch_document(self._id, event)
@classmethod
def _ensure_session_id(cls, session_id):
if session_id is None:
session_id = generate_session_id()
return session_id
def _handle_patch(self, message):
message.apply_to_document(self.document, self)
def _notify_disconnected(self):
''' Called by the ClientConnection we are using to notify us of disconnect.
'''
if self.document is not None:
self.document.remove_on_change(self)
self._callbacks.remove_all_callbacks()
def _session_callback_added(self, event):
self._callbacks.add_session_callback(event.callback)
def _session_callback_removed(self, event):
self._callbacks.remove_session_callback(event.callback)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
__doc__ = format_docstring(__doc__)
| |
'''py.test fixtures for Pyramid.
http://pyramid.readthedocs.org/en/latest/narr/testing.html
'''
import tempfile
import pkg_resources
import pytest
from pytest import fixture
from snovault.tests.testappfixtures import _app_settings as sno_settings
tempfile.tempdir = '/tmp'
pytest_plugins = [
'encoded.tests.fixtures.shared_fixtures',
'snovault.tests.serverfixtures',
'snovault.tests.testappfixtures',
'snovault.tests.toolfixtures',
'snovault.tests.pyramidfixtures',
'encoded.tests.fixtures.batch_download',
'encoded.tests.fixtures.ontology',
'encoded.tests.fixtures.testapp',
'encoded.tests.fixtures.schemas.access_key',
'encoded.tests.fixtures.schemas.analysis_step_version',
'encoded.tests.fixtures.schemas.analysis_step',
'encoded.tests.fixtures.schemas.analysis',
'encoded.tests.fixtures.schemas.annotation',
'encoded.tests.fixtures.schemas.antibody_characterization',
'encoded.tests.fixtures.schemas.atac_alignment_enrichment_quality_metric',
'encoded.tests.fixtures.schemas.atac_alignment_quality_metric',
'encoded.tests.fixtures.schemas.atac_library_complexity_quality_metric',
'encoded.tests.fixtures.schemas.atac_peak_enrichment_quality_metric',
'encoded.tests.fixtures.schemas.atac_replication_quality_metric',
'encoded.tests.fixtures.schemas.genetic_modification',
'encoded.tests.fixtures.schemas.analysis_step_run',
'encoded.tests.fixtures.schemas.antibody_lot',
'encoded.tests.fixtures.schemas.antibody',
'encoded.tests.fixtures.schemas.award',
'encoded.tests.fixtures.schemas.bismark_quality_metric',
'encoded.tests.fixtures.schemas.biosample_characterization',
'encoded.tests.fixtures.schemas.biosample_type',
'encoded.tests.fixtures.schemas.biosample',
'encoded.tests.fixtures.schemas.cart',
'encoded.tests.fixtures.schemas.characterization',
'encoded.tests.fixtures.schemas.chia_pet_alignment_quality_metric',
'encoded.tests.fixtures.schemas.chia_pet_chr_interactions_quality_metric',
'encoded.tests.fixtures.schemas.chia_pet_peak_enrichment_quality_metric',
'encoded.tests.fixtures.schemas.chip_alignment_enrichment_quality_metric',
'encoded.tests.fixtures.schemas.chip_peak_enrichment_quality_metric',
'encoded.tests.fixtures.schemas.chip_replication_quality_metric',
'encoded.tests.fixtures.schemas.chip_alignment_samstat_quality_metric',
'encoded.tests.fixtures.schemas.chip_library_quality_metric',
'encoded.tests.fixtures.schemas.chipseq_filter_quality_metric',
'encoded.tests.fixtures.schemas.collection_series',
'encoded.tests.fixtures.schemas.computational_model',
'encoded.tests.fixtures.schemas.correlation_quality_metric',
'encoded.tests.fixtures.schemas.cpg_correlation_quality_metric',
'encoded.tests.fixtures.schemas.dataset',
'encoded.tests.fixtures.schemas.differential_accessibility_series',
'encoded.tests.fixtures.schemas.differentiation_series',
'encoded.tests.fixtures.schemas.disease_series',
'encoded.tests.fixtures.schemas.dnase_footprinting_quality_metric',
'encoded.tests.fixtures.schemas.document',
'encoded.tests.fixtures.schemas.duplicates_quality_metric',
'encoded.tests.fixtures.schemas.experiment_series',
'encoded.tests.fixtures.schemas.experiment',
'encoded.tests.fixtures.schemas.file',
'encoded.tests.fixtures.schemas.fly_donor',
'encoded.tests.fixtures.schemas.gembs_alignment_quality_metric',
'encoded.tests.fixtures.schemas.generic_quality_metric',
'encoded.tests.fixtures.schemas.sc_atac_multiplet_quality_metric',
'encoded.tests.fixtures.schemas.gene',
'encoded.tests.fixtures.schemas.gene_silencing_series',
'encoded.tests.fixtures.schemas.functional_characterization_experiment',
'encoded.tests.fixtures.schemas.functional_characterization_series',
'encoded.tests.fixtures.schemas.hic_quality_metric',
'encoded.tests.fixtures.schemas.histone_chipseq_quality_metric',
'encoded.tests.fixtures.schemas.hotspot_quality_metric',
'encoded.tests.fixtures.schemas.human_donor',
'encoded.tests.fixtures.schemas.idr_quality_metric',
'encoded.tests.fixtures.schemas.lab',
'encoded.tests.fixtures.schemas.library',
'encoded.tests.fixtures.schemas.long_read_rna_quantification_quality_metric',
'encoded.tests.fixtures.schemas.long_read_rna_mapping_quality_metric',
'encoded.tests.fixtures.schemas.mad_quality_metric',
'encoded.tests.fixtures.schemas.manatee_donor',
'encoded.tests.fixtures.schemas.matched_set',
'encoded.tests.fixtures.schemas.micro_rna_mapping_quality_metric',
'encoded.tests.fixtures.schemas.micro_rna_quantification_quality_metric',
'encoded.tests.fixtures.schemas.mouse_donor',
'encoded.tests.fixtures.schemas.multiomics_series',
'encoded.tests.fixtures.schemas.organism',
'encoded.tests.fixtures.schemas.page',
'encoded.tests.fixtures.schemas.pipeline',
'encoded.tests.fixtures.schemas.platform',
'encoded.tests.fixtures.schemas.publication_data',
'encoded.tests.fixtures.schemas.publication',
'encoded.tests.fixtures.schemas.pulse_chase_time_series',
'encoded.tests.fixtures.schemas.quality_metric',
'encoded.tests.fixtures.schemas.reference_epigenome',
'encoded.tests.fixtures.schemas.reference',
'encoded.tests.fixtures.schemas.replicate',
'encoded.tests.fixtures.schemas.samtools_flagstats_quality_metric',
'encoded.tests.fixtures.schemas.samtools_stats_quality_metric',
'encoded.tests.fixtures.schemas.sc_atac_alignment_quality_metric',
'encoded.tests.fixtures.schemas.sc_atac_library_complexity_quality_metric',
'encoded.tests.fixtures.schemas.single_cell_rna_series',
'encoded.tests.fixtures.schemas.single_cell_unit',
'encoded.tests.fixtures.schemas.software_version',
'encoded.tests.fixtures.schemas.software',
'encoded.tests.fixtures.schemas.source',
'encoded.tests.fixtures.schemas.star_quality_metric',
'encoded.tests.fixtures.schemas.target',
'encoded.tests.fixtures.schemas.transgenic_enhancer_experiment',
'encoded.tests.fixtures.schemas.treatment_time_series',
'encoded.tests.fixtures.schemas.treatment_concentration_series',
'encoded.tests.fixtures.schemas.treatment',
'encoded.tests.fixtures.schemas.user',
'encoded.tests.fixtures.schemas.worm_donor',
]
@pytest.fixture(autouse=True)
def autouse_external_tx(external_tx):
pass
_app_settings = {
'collection_datastore': 'database',
'item_datastore': 'database',
'multiauth.policies': 'auth0 session remoteuser accesskey',
'multiauth.groupfinder': 'encoded.authorization.groupfinder',
'multiauth.policy.auth0.use': 'encoded.authentication.NamespacedAuthenticationPolicy',
'multiauth.policy.auth0.base': 'encoded.auth0.Auth0AuthenticationPolicy',
'multiauth.policy.auth0.namespace': 'auth0',
'multiauth.policy.session.use': 'encoded.authentication.NamespacedAuthenticationPolicy',
'multiauth.policy.session.base': 'pyramid.authentication.SessionAuthenticationPolicy',
'multiauth.policy.session.namespace': 'mailto',
'multiauth.policy.remoteuser.use': 'encoded.authentication.NamespacedAuthenticationPolicy',
'multiauth.policy.remoteuser.namespace': 'remoteuser',
'multiauth.policy.remoteuser.base': 'pyramid.authentication.RemoteUserAuthenticationPolicy',
'multiauth.policy.accesskey.use': 'encoded.authentication.NamespacedAuthenticationPolicy',
'multiauth.policy.accesskey.namespace': 'accesskey',
'multiauth.policy.accesskey.base': 'encoded.authentication.BasicAuthAuthenticationPolicy',
'multiauth.policy.accesskey.check': 'encoded.authentication.basic_auth_check',
'load_test_only': True,
'testing': True,
'stage_for_followup': 'vis_indexer',
'pyramid.debug_authorization': True,
'postgresql.statement_timeout': 20,
'retry.attempts': 3,
}
_app_settings['local_storage_host'] = sno_settings['local_storage_host']
_app_settings['local_storage_port'] = sno_settings['local_storage_port']
_app_settings['local_storage_redis_index'] = sno_settings['local_storage_redis_index']
_app_settings['local_storage_timeout'] = sno_settings['local_storage_timeout']
_app_settings['local_tz'] = sno_settings['local_tz']
@fixture(scope='session')
def app_settings(request, wsgi_server_host_port, conn, DBSession, redis_server):
from snovault import DBSESSION
settings = _app_settings.copy()
settings['auth0.audiences'] = 'http://%s:%s' % wsgi_server_host_port
settings[DBSESSION] = DBSession
return settings
@fixture(scope='session')
def app(app_settings):
'''WSGI application level functional testing.
'''
from encoded import main
return main({}, **app_settings)
@pytest.mark.fixture_cost(500)
@pytest.yield_fixture(scope='session')
def workbook(conn, app, app_settings):
tx = conn.begin_nested()
try:
from webtest import TestApp
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
testapp = TestApp(app, environ)
from encoded.loadxl import load_all
from pkg_resources import resource_filename
inserts = resource_filename('encoded', 'tests/data/inserts/')
docsdir = [resource_filename('encoded', 'tests/data/documents/')]
load_all(testapp, inserts, docsdir)
yield
finally:
tx.rollback()
| |
"""Test for views creation and link to html pages."""
from pyramid import testing
from pyramid_learning_journal.models import (
Entry,
get_tm_session,
)
from pyramid_learning_journal.models.meta import Base
from pyramid_learning_journal.views.default import (
list_view,
create_view,
detail_view,
edit_view,
login,
logout
)
from pyramid.config import Configurator
from pyramid.httpexceptions import HTTPNotFound, HTTPFound
from faker import Faker
import pytest
import datetime
import transaction
import os
FAKE_STUFF = Faker()
FAKE_ENTRIES = [Entry(
title=FAKE_STUFF.text(20),
body=FAKE_STUFF.text(250),
creation_date=datetime.datetime.now()
) for x in range(25)]
@pytest.fixture
def set_creds():
"""Set credentials for user and secret for tests."""
from passlib.apps import custom_app_context as context
os.environ['AUTH_USERNAME'] = 'badman'
os.environ['AUTH_PASSWORD'] = context.hash('thatsnotthejoker')
os.environ['SESSION_SECRET'] = 'sneekysnackbox'
@pytest.fixture
def dummy_request(db_session):
"""Make a fake HTTP request."""
return testing.DummyRequest(dbsession=db_session)
@pytest.fixture
def add_models(dummy_request):
"""Add entries to a dummy request."""
dummy_request.dbsession.add_all(FAKE_ENTRIES)
@pytest.fixture(scope="session")
def configuration(request):
"""Set up a Configurator instance."""
config = testing.setUp(settings={
'sqlalchemy.url': os.environ.get('TEST_DATABASE')
})
config.include('pyramid_learning_journal.models')
config.include('pyramid_learning_journal.routes')
def teardown():
testing.tearDown()
request.addfinalizer(teardown)
return config
@pytest.fixture
def db_session(configuration, request):
"""Create a session for interacting with the test database."""
SessionFactory = configuration.registry['dbsession_factory']
session = SessionFactory()
engine = session.bind
Base.metadata.create_all(engine)
def teardown():
session.transaction.rollback()
request.addfinalizer(teardown)
return session
@pytest.fixture(scope="session")
def testapp(request):
"""Create a test application to use for functional tests."""
from webtest import TestApp
def main(global_config, **settings):
"""Function returns a fake Pyramid WSGI application."""
settings['sqlalchemy.url'] = os.environ.get('TEST_DATABASE')
config = Configurator(settings=settings)
config.include('pyramid_jinja2')
config.include('pyramid_learning_journal.models')
config.include('pyramid_learning_journal.routes')
config.include('pyramid_learning_journal.security')
config.add_static_view(name='static',
path='pyramid_learning_journal:static')
config.scan()
return config.make_wsgi_app()
app = main({})
SessionFactory = app.registry['dbsession_factory']
engine = SessionFactory().bind
Base.metadata.create_all(bind=engine)
def teardown():
Base.metadata.drop_all(bind=engine)
request.addfinalizer(teardown)
return TestApp(app)
@pytest.fixture
def fill_test_db(testapp):
"""Set fake entries to the db for a session."""
SessionFactory = testapp.app.registry['dbsession_factory']
with transaction.manager:
dbsession = get_tm_session(SessionFactory, transaction.manager)
dbsession.add_all(FAKE_ENTRIES)
return dbsession
# ----- Unit Tests ----- #
def test_login_returns_dict(dummy_request):
"""Test request to login returns a dict."""
response = login(dummy_request)
assert type(response) == dict
def test_login_bad_creds_both(dummy_request):
"""Test login with bad credentials returns error message."""
dummy_request.method = 'POST'
dummy_request.POST = {
'username': 'blergflerg',
'password': 'asfdsakfmldsa'
}
assert login(dummy_request) == {'error': 'Bad username or password'}
def test_login_bad_creds_one(dummy_request, set_creds):
"""Test login with bad password returns error message."""
dummy_request.method = 'POST'
dummy_request.POST = {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'asfdsakfmldsa'
}
assert login(dummy_request) == {'error': 'Bad username or password'}
assert type(login(dummy_request)) == dict
def test_login_with_good_creds(dummy_request, set_creds):
"""Test login with good credentials redirects."""
dummy_request.method = 'POST'
dummy_request.POST = {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
}
response = login(dummy_request)
assert response.status_code == 302
assert isinstance(response, HTTPFound)
def test_logout_redirect(dummy_request):
"""Test for redirection upon logout."""
response = logout(dummy_request)
assert response.status_code == 302
assert isinstance(response, HTTPFound)
def test_list_view_returns_empty_without_db(dummy_request):
"""Test list view returns a dict when called."""
response = list_view(dummy_request)
assert len(response['posts']) == 0
def test_filling_fake_db(add_models, db_session):
"""Check for entries added to db."""
assert len(db_session.query(Entry).all()) == 25
def test_list_view_returns_dict(dummy_request):
"""Test list view returns a dict when called."""
assert type(list_view(dummy_request)) == dict
def test_detail_view_with_id_raises_except(dummy_request):
"""Test proper error raising with non matching id on detail view."""
dummy_request.matchdict['id'] = '9000'
with pytest.raises(HTTPNotFound):
detail_view(dummy_request)
def test_detail_view_returns_dict_with_db(db_session, dummy_request):
"""Test detail view returns a dict when called."""
fake = Entry(
title=u'Stuff',
body=u'Some thing goes here.',
creation_date=datetime.datetime.now()
)
db_session.add(fake)
fakeid = str(db_session.query(Entry)[0].id)
dummy_request.matchdict['id'] = fakeid
response = detail_view(dummy_request)
assert type(response) == dict
def test_create_view_returns_dict(dummy_request):
"""Test create view returns a dict when called."""
assert type(create_view(dummy_request)) == dict
def test_create_view_with_incomplete_post(dummy_request):
"""Test that create view returns the partial input."""
dummy_request.method = 'POST'
dummy_request.POST = {'title': 'bobs post', 'body': ''}
response = create_view(dummy_request)
assert response['title'] == 'bobs post'
def test_create_view_addes_a_post(dummy_request, db_session):
"""Given a complete post create view adds it to the DB."""
assert len(db_session.query(Entry).all()) == 0
dummy_request.method = 'POST'
dummy_request.POST = {'title': 'bobs post', 'body': 'stuff'}
create_view(dummy_request)
assert len(db_session.query(Entry).all()) == 1
assert db_session.query(Entry).first().title == 'bobs post'
def test_create_view_on_success_redirects(dummy_request):
"""Test that on creation of a new post redirects."""
dummy_request.method = 'POST'
dummy_request.POST = {'title': 'morgans post', 'body': 'cake'}
response = create_view(dummy_request)
assert response.status_code == 302
assert isinstance(response, HTTPFound)
def test_edit_view_returns_dict_with_db(dummy_request, db_session):
"""Test edit view returns a dict when called with a db."""
fake = Entry(
title=u'Stuff',
body=u'Some thing goes here.',
creation_date=datetime.datetime.now()
)
db_session.add(fake)
fakeid = str(db_session.query(Entry)[0].id)
dummy_request.matchdict['id'] = fakeid
response = edit_view(dummy_request)
assert type(response) == dict
def test_db_gets_new_entry_with_content(dummy_request, db_session):
"""Test db gets entry with proper content."""
fake = Entry(
title=u'Stuff',
body=u'Some thing goes here.',
creation_date=datetime.datetime.now()
)
db_session.add(fake)
fakeid = str(db_session.query(Entry)[0].id)
dummy_request.matchdict['id'] = fakeid
response = detail_view(dummy_request)
assert len(db_session.query(Entry).all()) == 1
assert fake.title in response['entry'].title
assert fake.body in response['entry'].body
def test_edit_view_with_id_raises_except(dummy_request):
"""Test proper error raising with non matching id on edit view."""
dummy_request.matchdict['id'] = '9000'
with pytest.raises(HTTPNotFound):
edit_view(dummy_request)
def test_edit_view_with_post_changes_an_entry(dummy_request, db_session):
"""Test that a post request changes an entry."""
fake = Entry(
title=u'Cake Story',
body=u'The best cake ever eaten was chocolate!',
creation_date=datetime.datetime.now()
)
db_session.add(fake)
fakeid = str(db_session.query(Entry)[0].id)
dummy_request.matchdict['id'] = fakeid
dummy_request.method = 'POST'
dummy_request.POST = {'title': 'Pie Story',
'body': 'The pie story is better though!'}
edit_view(dummy_request)
assert db_session.query(Entry)[0].title == 'Pie Story'
assert db_session.query(Entry)[0].body == 'The pie story is better though!'
def test_edit_view_on_success_redirects(dummy_request, db_session):
"""Test that on edit of post redirects."""
fake = Entry(
title=u'Cake Story',
body=u'The best cake ever eaten was chocolate!',
creation_date=datetime.datetime.now()
)
db_session.add(fake)
fakeid = str(db_session.query(Entry)[0].id)
dummy_request.matchdict['id'] = fakeid
dummy_request.method = 'POST'
dummy_request.POST = {'title': 'Pie Story',
'body': 'The pie story is better though!'}
response = edit_view(dummy_request)
assert response.status_code == 302
assert isinstance(response, HTTPFound)
# # ----- Functional Tests ----- #
def test_user_forbidden_create_page(testapp):
"""Test access blocked for non logged users on create page."""
testapp.get('/logout')
response = testapp.get('/journal/new-entry', status=403)
assert response.status_code == 403
def test_user_forbidden_update_page(testapp):
"""Test access blocked for non logged users on update page."""
testapp.get('/logout')
response = testapp.get('/journal/1/edit-entry', status=403)
assert response.status_code == 403
def test_home_route_has_home_contents(testapp, db_session):
"""Test list view is routed to home page."""
response = testapp.get('/')
assert '<h1 class="blog-title">The Pyramid Blog</h1>' in response
def test_home_view_returns_200(testapp, db_session):
"""Test home view with testapp returns 200 OK."""
response = testapp.get('/')
assert response.status_code == 200
def test_home_route_has_list_of_entries(fill_test_db, db_session, testapp):
"""Test if there are the right amount of entries on the home page."""
response = testapp.get('/')
num_posts = response.html.find_all('h2')
assert len(num_posts) == 25
def test_new_entry_view_returns_proper_content(testapp, set_creds):
"""New entry view returns the actual content from the html."""
testapp.post('/login', {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
})
response = testapp.get('/journal/new-entry')
assert '<h1 class="blog-title">Create New Entry!</h1>' in response
def test_detail_view_has_single_entry(testapp, db_session):
"""Test that the detail page only brings up one entry."""
testapp.post('/login', {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
})
response = testapp.get('/journal/1')
html = response.html
assert html.find()
num_list_items = (len(html.find_all('h2')))
assert num_list_items == 1
def test_detail_view_returns_proper_content(testapp, db_session):
"""Entry view returns a Response object when given a request."""
testapp.post('/login', {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
})
response = testapp.get('/journal/1', status=200)
html = response.html
entry = db_session.query(Entry).first()
assert html.find()
expected_text = entry.title
assert expected_text in str(html)
def test_edit_view_has_single_entry(testapp, db_session):
"""Test that the detail page only brings up one entry."""
testapp.post('/login', {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
})
response = testapp.get('/journal/1/edit-entry')
html = response.html
entry = db_session.query(Entry).first()
assert html.find()
assert entry.title in str(html)
def test_edit_view_returns_proper_content(testapp, db_session):
"""Entry view returns a Response object when given a request."""
testapp.post('/login', {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
})
response = testapp.get('/journal/1/edit-entry')
assert '<h1 class="blog-title">Edit Entry</h1>' in response.text
def test_detail_view_with_bad_id(testapp):
"""Test a bad ID to the detail view returns 404 page."""
response = testapp.get('/journal/9001', status=404)
assert '404 page not found' in response.text
def test_edit_view_with_bad_id(testapp):
"""Test a bad ID to the edit view returns 404 page."""
response = testapp.get('/journal/9001/edit-entry', status=404)
assert '404 page not found' in response.text
def test_detail_entry_has_404(testapp):
"""Check to see if detail view 404s properly."""
response = testapp.get('/journal/100', status=404)
html = response.html
assert html.find()
expected_text = '404 page not found'
assert expected_text in str(html)
def test_edit_entry_has_404(testapp):
"""Check to see if edit view 404s properly."""
response = testapp.get('/journal/100/edit-entry', status=404)
html = response.html
assert html.find()
expected_text = '404 page not found'
assert expected_text in str(html)
def test_create_view_returns_200(testapp, db_session):
"""Look for a 200 in create view."""
testapp.post('/login', {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
})
response = testapp.get('/journal/new-entry')
assert response.status_code == 200
def test_edit_view_returns_200(testapp, db_session):
"""Look for a 200 in edit view."""
testapp.post('/login', {
'username': os.environ.get('AUTH_USERNAME'),
'password': 'thatsnotthejoker'
})
response = testapp.get('/journal/1/edit-entry')
assert response.status_code == 200
def test_detail_view_returns_200(testapp, db_session):
"""Look for a 200 in detail view."""
response = testapp.get('/journal/1')
assert response.status_code == 200
# # ----- Security/CSRF Tests ----- #
def test_login_returns_200(testapp):
"""Test that the response code for the login view is 200."""
response = testapp.get('/login')
assert response.status_code == 200
def test_create_view_logged_in_partial_post(testapp):
"""Test that post creation is open when logged and returns partial data."""
response = testapp.get('/journal/new-entry')
token = response.html.find('input', {'type': 'hidden'}).attrs['value']
fake_post = {
'csrf_token': token,
'title': '',
'body': 'sample text'
}
response = testapp.post('/journal/new-entry', fake_post)
assert 'sample text' in response.text
def test_create_view_logged_in_full_post(testapp):
"""Test post creation is open when logged, redirects with a full post."""
response = testapp.get('/journal/new-entry')
token = response.html.find('input', {'type': 'hidden'}).attrs['value']
fake_post = {
'csrf_token': token,
'title': 'yes',
'body': 'sample text'
}
response = testapp.post('/journal/new-entry', fake_post)
assert response.location == 'http://localhost/'
assert fake_post['title'] in testapp.get('/')
assert fake_post['body'] in testapp.get('/')
def test_edit_view_logged_in_edit_post(testapp):
"""Test edit is open when logged, redirects, and has the new content."""
response = testapp.get('/journal/1/edit-entry')
token = response.html.find('input', {'type': 'hidden'}).attrs['value']
response_body = response.html.find('textarea',
{'name': 'body'})
fake_post = {
'csrf_token': token,
'title': 'New title for some stuff!',
'body': response_body
}
response = testapp.post('/journal/1/edit-entry', fake_post)
assert response.location == 'http://localhost/journal/1'
assert fake_post['title'] in testapp.get('/journal/1')
| |
#!/usr/bin/env python
# A part of pdfrw (pdfrw.googlecode.com)
# Copyright (C) 2006-2009 Patrick Maupin, Austin, Texas
# MIT license -- See LICENSE.txt for details
'''
The PdfWriter class writes an entire PDF file out to disk.
The writing process is not at all optimized or organized.
An instance of the PdfWriter class has two methods:
addpage(page)
and
write(fname)
addpage() assumes that the pages are part of a valid
tree/forest of PDF objects.
'''
try:
set
except NameError:
from sets import Set as set
from pdfrw.objects import (PdfName, PdfArray, PdfDict, IndirectPdfDict,
PdfObject, PdfString)
from pdfrw.compress import compress as do_compress
from pdfrw.errors import PdfOutputError, log
import pickle
NullObject = PdfObject('null')
NullObject.indirect = True
NullObject.Type = 'Null object'
def FormatObjects(f, trailer, version='1.3', compress=True, killobj=(),
id=id, isinstance=isinstance, getattr=getattr, len=len,
sum=sum, set=set, str=str, basestring=basestring,
hasattr=hasattr, repr=repr, enumerate=enumerate,
list=list, dict=dict, tuple=tuple,
do_compress=do_compress, PdfArray=PdfArray,
PdfDict=PdfDict, PdfObject=PdfObject,
encode=PdfString.encode):
''' FormatObjects performs the actual formatting and disk write.
Should be a class, was a class, turned into nested functions
for performace (to reduce attribute lookups).
'''
def add(obj):
''' Add an object to our list, if it's an indirect
object. Just format it if not.
'''
# Can't hash dicts, so just hash the object ID
objid = id(obj)
# Automatically set stream objects to indirect
if isinstance(obj, PdfDict):
indirect = obj.indirect or (obj.stream is not None)
else:
indirect = getattr(obj, 'indirect', False)
if not indirect:
if objid in visited:
log.warning('Replicating direct %s object, '
'should be indirect for optimal file size' %
type(obj))
obj = type(obj)(obj)
objid = id(obj)
visiting(objid)
result = format_obj(obj)
leaving(objid)
return result
objnum = indirect_dict_get(objid)
# If we haven't seen the object yet, we need to
# add it to the indirect object list.
if objnum is None:
swapped = swapobj(objid)
if swapped is not None:
old_id = objid
obj = swapped
objid = id(obj)
objnum = indirect_dict_get(objid)
if objnum is not None:
indirect_dict[old_id] = objnum
return '%s 0 R' % objnum
objnum = len(objlist) + 1
objlist_append(None)
indirect_dict[objid] = objnum
deferred.append((objnum - 1, obj))
return '%s 0 R' % objnum
def format_array(myarray, formatter):
# Format array data into semi-readable ASCII
if sum([len(x) for x in myarray]) <= 70:
return formatter % space_join(myarray)
return format_big(myarray, formatter)
def format_big(myarray, formatter):
bigarray = []
count = 1000000
for x in myarray:
lenx = len(x) + 1
count += lenx
if count > 71:
subarray = []
bigarray.append(subarray)
count = lenx
subarray.append(x)
return formatter % lf_join([space_join(x) for x in bigarray])
def format_obj(obj):
''' format PDF object data into semi-readable ASCII.
May mutually recurse with add() -- add() will
return references for indirect objects, and add
the indirect object to the list.
'''
while 1:
if isinstance(obj, (list, dict, tuple)):
if isinstance(obj, PdfArray):
myarray = [add(x) for x in obj]
return format_array(myarray, '[%s]')
elif isinstance(obj, PdfDict):
if compress and obj.stream:
do_compress([obj])
myarray = []
dictkeys = [str(x) for x in obj.keys()]
dictkeys.sort()
for key in dictkeys:
myarray.append(key)
myarray.append(add(obj[key]))
result = format_array(myarray, '<<%s>>')
stream = obj.stream
if stream is not None:
result = ('%s\nstream\n%s\nendstream' %
(result, stream))
return result
obj = (PdfArray, PdfDict)[isinstance(obj, dict)](obj)
continue
if not hasattr(obj, 'indirect') and isinstance(obj, basestring):
return encode(obj)
return str(getattr(obj, 'encoded', obj))
def format_deferred():
while deferred:
index, obj = deferred.pop()
objlist[index] = format_obj(obj)
indirect_dict = {}
indirect_dict_get = indirect_dict.get
objlist = []
objlist_append = objlist.append
visited = set()
visiting = visited.add
leaving = visited.remove
space_join = ' '.join
lf_join = '\n '.join
f_write = f.write
deferred = []
# Don't reference old catalog or pages objects --
# swap references to new ones.
swapobj = {PdfName.Catalog: trailer.Root,
PdfName.Pages: trailer.Root.Pages, None: trailer}.get
swapobj = [(objid, swapobj(obj.Type))
for objid, obj in killobj.iteritems()]
swapobj = dict((objid, obj is None and NullObject or obj)
for objid, obj in swapobj).get
for objid in killobj:
assert swapobj(objid) is not None
# The first format of trailer gets all the information,
# but we throw away the actual trailer formatting.
format_obj(trailer)
# Keep formatting until we're done.
# (Used to recurse inside format_obj for this, but
# hit system limit.)
format_deferred()
# Now we know the size, so we update the trailer dict
# and get the formatted data.
trailer.Size = PdfObject(len(objlist) + 1)
trailer = format_obj(trailer)
# Now we have all the pieces to write out to the file.
# Keep careful track of the counts while we do it so
# we can correctly build the cross-reference.
header = '%%PDF-%s\n%%\xe2\xe3\xcf\xd3\n' % version
f_write(header)
offset = len(header)
offsets = [(0, 65535, 'f')]
offsets_append = offsets.append
for i, x in enumerate(objlist):
objstr = '%s 0 obj\n%s\nendobj\n' % (i + 1, x)
offsets_append((offset, 0, 'n'))
offset += len(objstr)
f_write(objstr)
f_write('xref\n0 %s\n' % len(offsets))
for x in offsets:
f_write('%010d %05d %s\r\n' % x)
f_write('trailer\n\n%s\nstartxref\n%s\n%%%%EOF\n' % (trailer, offset))
class PdfWriter(object):
_trailer = None
def __init__(self, version='1.3', compress=False):
self.pagearray = PdfArray()
self.compress = compress
self.version = version
self.killobj = {}
def addpage(self, page):
self._trailer = None
if page.Type != PdfName.Page:
raise PdfOutputError('Bad /Type: Expected %s, found %s'
% (PdfName.Page, page.Type))
inheritable = page.inheritable # searches for resources
self.pagearray.append(
IndirectPdfDict(
page,
Resources=inheritable.Resources,
MediaBox=inheritable.MediaBox,
CropBox=inheritable.CropBox,
Rotate=inheritable.Rotate,
)
)
# Add parents in the hierarchy to objects we
# don't want to output
killobj = self.killobj
obj = page.Parent
while obj is not None:
objid = id(obj)
if objid in killobj:
break
killobj[objid] = obj
obj = obj.Parent
return self
addPage = addpage # for compatibility with pyPdf
def addpages(self, pagelist):
for page in pagelist:
self.addpage(page)
return self
def _get_trailer(self):
trailer = self._trailer
if trailer is not None:
return trailer
# Create the basic object structure of the PDF file
trailer = PdfDict(
Root=IndirectPdfDict(
Type=PdfName.Catalog,
Pages=IndirectPdfDict(
Type=PdfName.Pages,
Count=PdfObject(len(self.pagearray)),
Kids=self.pagearray
)
)
)
# Make all the pages point back to the page dictionary and
# ensure they are indirect references
pagedict = trailer.Root.Pages
for page in pagedict.Kids:
page.Parent = pagedict
self._trailer = trailer
return trailer
def _set_trailer(self, trailer):
self._trailer = trailer
trailer = property(_get_trailer, _set_trailer)
def write(self, fname, trailer=None):
trailer = trailer or self.trailer
# Dump the data. We either have a filename or a preexisting
# file object.
preexisting = hasattr(fname, 'write')
f = preexisting and fname or open(fname, 'wb')
FormatObjects(f, trailer, self.version, self.compress, self.killobj)
if not preexisting:
f.close()
# Dump the trace.
if trailer.active_trace != None:
fname_trace = fname + '.trace'
f = open(fname_trace, 'wb')
pickle.dump(trailer.active_trace, f)
f.close()
if __name__ == '__main__':
import logging
log.setLevel(logging.DEBUG)
import pdfreader
x = pdfreader.PdfReader('source.pdf')
y = PdfWriter()
for i, page in enumerate(x.pages):
print ' Adding page', i + 1, '\r',
y.addpage(page)
print
y.write('result.pdf')
print
| |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import ipaddress
import os
import pytest
import six
from cryptography import x509
from cryptography.hazmat.backends.interfaces import RSABackend, X509Backend
from .test_x509 import _load_cert
class TestExtension(object):
def test_not_an_oid(self):
bc = x509.BasicConstraints(ca=False, path_length=None)
with pytest.raises(TypeError):
x509.Extension("notanoid", True, bc)
def test_critical_not_a_bool(self):
bc = x509.BasicConstraints(ca=False, path_length=None)
with pytest.raises(TypeError):
x509.Extension(x509.OID_BASIC_CONSTRAINTS, "notabool", bc)
def test_repr(self):
bc = x509.BasicConstraints(ca=False, path_length=None)
ext = x509.Extension(x509.OID_BASIC_CONSTRAINTS, True, bc)
assert repr(ext) == (
"<Extension(oid=<ObjectIdentifier(oid=2.5.29.19, name=basicConst"
"raints)>, critical=True, value=<BasicConstraints(ca=False, path"
"_length=None)>)>"
)
class TestKeyUsage(object):
def test_key_agreement_false_encipher_decipher_true(self):
with pytest.raises(ValueError):
x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=True,
decipher_only=False
)
with pytest.raises(ValueError):
x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=True,
decipher_only=True
)
with pytest.raises(ValueError):
x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=True
)
def test_properties_key_agreement_true(self):
ku = x509.KeyUsage(
digital_signature=True,
content_commitment=True,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=True,
crl_sign=False,
encipher_only=False,
decipher_only=False
)
assert ku.digital_signature is True
assert ku.content_commitment is True
assert ku.key_encipherment is False
assert ku.data_encipherment is False
assert ku.key_agreement is False
assert ku.key_cert_sign is True
assert ku.crl_sign is False
def test_key_agreement_true_properties(self):
ku = x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=True,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=True
)
assert ku.key_agreement is True
assert ku.encipher_only is False
assert ku.decipher_only is True
def test_key_agreement_false_properties(self):
ku = x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False
)
assert ku.key_agreement is False
with pytest.raises(ValueError):
ku.encipher_only
with pytest.raises(ValueError):
ku.decipher_only
def test_repr_key_agreement_false(self):
ku = x509.KeyUsage(
digital_signature=True,
content_commitment=True,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=True,
crl_sign=False,
encipher_only=False,
decipher_only=False
)
assert repr(ku) == (
"<KeyUsage(digital_signature=True, content_commitment=True, key_en"
"cipherment=False, data_encipherment=False, key_agreement=False, k"
"ey_cert_sign=True, crl_sign=False, encipher_only=None, decipher_o"
"nly=None)>"
)
def test_repr_key_agreement_true(self):
ku = x509.KeyUsage(
digital_signature=True,
content_commitment=True,
key_encipherment=False,
data_encipherment=False,
key_agreement=True,
key_cert_sign=True,
crl_sign=False,
encipher_only=False,
decipher_only=False
)
assert repr(ku) == (
"<KeyUsage(digital_signature=True, content_commitment=True, key_en"
"cipherment=False, data_encipherment=False, key_agreement=True, k"
"ey_cert_sign=True, crl_sign=False, encipher_only=False, decipher_"
"only=False)>"
)
class TestSubjectKeyIdentifier(object):
def test_properties(self):
value = binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
ski = x509.SubjectKeyIdentifier(value)
assert ski.digest == value
def test_repr(self):
ski = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ext = x509.Extension(x509.OID_SUBJECT_KEY_IDENTIFIER, False, ski)
if six.PY3:
assert repr(ext) == (
"<Extension(oid=<ObjectIdentifier(oid=2.5.29.14, name=subjectK"
"eyIdentifier)>, critical=False, value=<SubjectKeyIdentifier(d"
"igest=b\'\\t#\\x84\\x93\"0I\\x8b\\xc9\\x80\\xaa\\x80\\x98Eoo"
"\\xf7\\xff:\\xc9\')>)>"
)
else:
assert repr(ext) == (
"<Extension(oid=<ObjectIdentifier(oid=2.5.29.14, name=subjectK"
"eyIdentifier)>, critical=False, value=<SubjectKeyIdentifier(d"
"igest=\'\\t#\\x84\\x93\"0I\\x8b\\xc9\\x80\\xaa\\x80\\x98Eoo"
"\\xf7\\xff:\\xc9\')>)>"
)
def test_eq(self):
ski = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ski2 = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
assert ski == ski2
def test_ne(self):
ski = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"092384932230498bc980aa8098456f6ff7ff3ac9")
)
ski2 = x509.SubjectKeyIdentifier(
binascii.unhexlify(b"aa8098456f6ff7ff3ac9092384932230498bc980")
)
assert ski != ski2
assert ski != object()
class TestAuthorityKeyIdentifier(object):
def test_authority_cert_issuer_not_name(self):
with pytest.raises(TypeError):
x509.AuthorityKeyIdentifier(b"identifier", "notname", 3)
def test_authority_cert_serial_number_not_integer(self):
name = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1'),
x509.NameAttribute(x509.ObjectIdentifier('oid2'), 'value2'),
])
with pytest.raises(TypeError):
x509.AuthorityKeyIdentifier(b"identifier", name, "notanint")
def test_authority_issuer_none_serial_not_none(self):
with pytest.raises(ValueError):
x509.AuthorityKeyIdentifier(b"identifier", None, 3)
def test_authority_issuer_not_none_serial_none(self):
name = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1'),
x509.NameAttribute(x509.ObjectIdentifier('oid2'), 'value2'),
])
with pytest.raises(ValueError):
x509.AuthorityKeyIdentifier(b"identifier", name, None)
def test_authority_cert_serial_and_issuer_none(self):
aki = x509.AuthorityKeyIdentifier(b"id", None, None)
assert aki.key_identifier == b"id"
assert aki.authority_cert_issuer is None
assert aki.authority_cert_serial_number is None
def test_repr(self):
name = x509.Name([x509.NameAttribute(x509.OID_COMMON_NAME, 'myCN')])
aki = x509.AuthorityKeyIdentifier(b"digest", name, 1234)
if six.PY3:
assert repr(aki) == (
"<AuthorityKeyIdentifier(key_identifier=b'digest', authority_"
"cert_issuer=<Name([<NameAttribute(oid=<ObjectIdentifier(oid="
"2.5.4.3, name=commonName)>, value='myCN')>])>, authority_cer"
"t_serial_number=1234)>"
)
else:
assert repr(aki) == (
"<AuthorityKeyIdentifier(key_identifier='digest', authority_ce"
"rt_issuer=<Name([<NameAttribute(oid=<ObjectIdentifier(oid=2.5"
".4.3, name=commonName)>, value='myCN')>])>, authority_cert_se"
"rial_number=1234)>"
)
class TestBasicConstraints(object):
def test_ca_not_boolean(self):
with pytest.raises(TypeError):
x509.BasicConstraints(ca="notbool", path_length=None)
def test_path_length_not_ca(self):
with pytest.raises(ValueError):
x509.BasicConstraints(ca=False, path_length=0)
def test_path_length_not_int(self):
with pytest.raises(TypeError):
x509.BasicConstraints(ca=True, path_length=1.1)
with pytest.raises(TypeError):
x509.BasicConstraints(ca=True, path_length="notint")
def test_path_length_negative(self):
with pytest.raises(TypeError):
x509.BasicConstraints(ca=True, path_length=-1)
def test_repr(self):
na = x509.BasicConstraints(ca=True, path_length=None)
assert repr(na) == (
"<BasicConstraints(ca=True, path_length=None)>"
)
class TestExtendedKeyUsage(object):
def test_not_all_oids(self):
with pytest.raises(TypeError):
x509.ExtendedKeyUsage(["notoid"])
def test_iter_len(self):
eku = x509.ExtendedKeyUsage([
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.1"),
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.2"),
])
assert len(eku) == 2
assert list(eku) == [
x509.OID_SERVER_AUTH,
x509.OID_CLIENT_AUTH
]
def test_repr(self):
eku = x509.ExtendedKeyUsage([
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.1"),
x509.ObjectIdentifier("1.3.6.1.5.5.7.3.2"),
])
assert repr(eku) == (
"<ExtendedKeyUsage([<ObjectIdentifier(oid=1.3.6.1.5.5.7.3.1, name="
"serverAuth)>, <ObjectIdentifier(oid=1.3.6.1.5.5.7.3.2, name=clien"
"tAuth)>])>"
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestExtensions(object):
def test_no_extensions(self, backend):
cert = _load_cert(
os.path.join("x509", "verisign_md2_root.pem"),
x509.load_pem_x509_certificate,
backend
)
ext = cert.extensions
assert len(ext) == 0
assert list(ext) == []
with pytest.raises(x509.ExtensionNotFound) as exc:
ext.get_extension_for_oid(x509.OID_BASIC_CONSTRAINTS)
assert exc.value.oid == x509.OID_BASIC_CONSTRAINTS
def test_one_extension(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom", "basic_constraints_not_critical.pem"
),
x509.load_pem_x509_certificate,
backend
)
extensions = cert.extensions
ext = extensions.get_extension_for_oid(x509.OID_BASIC_CONSTRAINTS)
assert ext is not None
assert ext.value.ca is False
def test_duplicate_extension(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom", "two_basic_constraints.pem"
),
x509.load_pem_x509_certificate,
backend
)
with pytest.raises(x509.DuplicateExtension) as exc:
cert.extensions
assert exc.value.oid == x509.OID_BASIC_CONSTRAINTS
def test_unsupported_critical_extension(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom", "unsupported_extension_critical.pem"
),
x509.load_pem_x509_certificate,
backend
)
with pytest.raises(x509.UnsupportedExtension) as exc:
cert.extensions
assert exc.value.oid == x509.ObjectIdentifier("1.2.3.4")
def test_unsupported_extension(self, backend):
# TODO: this will raise an exception when all extensions are complete
cert = _load_cert(
os.path.join(
"x509", "custom", "unsupported_extension.pem"
),
x509.load_pem_x509_certificate,
backend
)
extensions = cert.extensions
assert len(extensions) == 0
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestBasicConstraintsExtension(object):
def test_ca_true_pathlen_6(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs", "pathLenConstraint6CACert.crt"
),
x509.load_der_x509_certificate,
backend
)
ext = cert.extensions.get_extension_for_oid(
x509.OID_BASIC_CONSTRAINTS
)
assert ext is not None
assert ext.critical is True
assert ext.value.ca is True
assert ext.value.path_length == 6
def test_path_length_zero(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "bc_path_length_zero.pem"),
x509.load_pem_x509_certificate,
backend
)
ext = cert.extensions.get_extension_for_oid(
x509.OID_BASIC_CONSTRAINTS
)
assert ext is not None
assert ext.critical is True
assert ext.value.ca is True
assert ext.value.path_length == 0
def test_ca_true_no_pathlen(self, backend):
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
ext = cert.extensions.get_extension_for_oid(
x509.OID_BASIC_CONSTRAINTS
)
assert ext is not None
assert ext.critical is True
assert ext.value.ca is True
assert ext.value.path_length is None
def test_ca_false(self, backend):
cert = _load_cert(
os.path.join("x509", "cryptography.io.pem"),
x509.load_pem_x509_certificate,
backend
)
ext = cert.extensions.get_extension_for_oid(
x509.OID_BASIC_CONSTRAINTS
)
assert ext is not None
assert ext.critical is True
assert ext.value.ca is False
assert ext.value.path_length is None
def test_no_basic_constraints(self, backend):
cert = _load_cert(
os.path.join(
"x509",
"PKITS_data",
"certs",
"ValidCertificatePathTest1EE.crt"
),
x509.load_der_x509_certificate,
backend
)
with pytest.raises(x509.ExtensionNotFound):
cert.extensions.get_extension_for_oid(x509.OID_BASIC_CONSTRAINTS)
def test_basic_constraint_not_critical(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom", "basic_constraints_not_critical.pem"
),
x509.load_pem_x509_certificate,
backend
)
ext = cert.extensions.get_extension_for_oid(
x509.OID_BASIC_CONSTRAINTS
)
assert ext is not None
assert ext.critical is False
assert ext.value.ca is False
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestSubjectKeyIdentifierExtension(object):
def test_subject_key_identifier(self, backend):
cert = _load_cert(
os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"),
x509.load_der_x509_certificate,
backend
)
ext = cert.extensions.get_extension_for_oid(
x509.OID_SUBJECT_KEY_IDENTIFIER
)
ski = ext.value
assert ext is not None
assert ext.critical is False
assert ski.digest == binascii.unhexlify(
b"580184241bbc2b52944a3da510721451f5af3ac9"
)
def test_no_subject_key_identifier(self, backend):
cert = _load_cert(
os.path.join("x509", "custom", "bc_path_length_zero.pem"),
x509.load_pem_x509_certificate,
backend
)
with pytest.raises(x509.ExtensionNotFound):
cert.extensions.get_extension_for_oid(
x509.OID_SUBJECT_KEY_IDENTIFIER
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
class TestKeyUsageExtension(object):
def test_no_key_usage(self, backend):
cert = _load_cert(
os.path.join("x509", "verisign_md2_root.pem"),
x509.load_pem_x509_certificate,
backend
)
ext = cert.extensions
with pytest.raises(x509.ExtensionNotFound) as exc:
ext.get_extension_for_oid(x509.OID_KEY_USAGE)
assert exc.value.oid == x509.OID_KEY_USAGE
def test_all_purposes(self, backend):
cert = _load_cert(
os.path.join(
"x509", "custom", "all_key_usages.pem"
),
x509.load_pem_x509_certificate,
backend
)
extensions = cert.extensions
ext = extensions.get_extension_for_oid(x509.OID_KEY_USAGE)
assert ext is not None
ku = ext.value
assert ku.digital_signature is True
assert ku.content_commitment is True
assert ku.key_encipherment is True
assert ku.data_encipherment is True
assert ku.key_agreement is True
assert ku.key_cert_sign is True
assert ku.crl_sign is True
assert ku.encipher_only is True
assert ku.decipher_only is True
def test_key_cert_sign_crl_sign(self, backend):
cert = _load_cert(
os.path.join(
"x509", "PKITS_data", "certs", "pathLenConstraint6CACert.crt"
),
x509.load_der_x509_certificate,
backend
)
ext = cert.extensions.get_extension_for_oid(x509.OID_KEY_USAGE)
assert ext is not None
assert ext.critical is True
ku = ext.value
assert ku.digital_signature is False
assert ku.content_commitment is False
assert ku.key_encipherment is False
assert ku.data_encipherment is False
assert ku.key_agreement is False
assert ku.key_cert_sign is True
assert ku.crl_sign is True
@pytest.mark.parametrize(
"name", [
x509.RFC822Name,
x509.DNSName,
x509.UniformResourceIdentifier
]
)
class TestTextGeneralNames(object):
def test_not_text(self, name):
with pytest.raises(TypeError):
name(b"notaunicodestring")
with pytest.raises(TypeError):
name(1.3)
def test_repr(self, name):
gn = name(six.u("string"))
assert repr(gn) == "<{0}(value=string)>".format(name.__name__)
def test_eq(self, name):
gn = name(six.u("string"))
gn2 = name(six.u("string"))
assert gn == gn2
def test_ne(self, name):
gn = name(six.u("string"))
gn2 = name(six.u("string2"))
assert gn != gn2
assert gn != object()
class TestDirectoryName(object):
def test_not_name(self):
with pytest.raises(TypeError):
x509.DirectoryName(b"notaname")
with pytest.raises(TypeError):
x509.DirectoryName(1.3)
def test_repr(self):
name = x509.Name([x509.NameAttribute(x509.OID_COMMON_NAME, 'value1')])
gn = x509.DirectoryName(x509.Name([name]))
assert repr(gn) == (
"<DirectoryName(value=<Name([<Name([<NameAttribute(oid=<ObjectIden"
"tifier(oid=2.5.4.3, name=commonName)>, value='value1')>])>])>)>"
)
def test_eq(self):
name = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1')
])
name2 = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1')
])
gn = x509.DirectoryName(x509.Name([name]))
gn2 = x509.DirectoryName(x509.Name([name2]))
assert gn == gn2
def test_ne(self):
name = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value1')
])
name2 = x509.Name([
x509.NameAttribute(x509.ObjectIdentifier('oid'), 'value2')
])
gn = x509.DirectoryName(x509.Name([name]))
gn2 = x509.DirectoryName(x509.Name([name2]))
assert gn != gn2
assert gn != object()
class TestRegisteredID(object):
def test_not_oid(self):
with pytest.raises(TypeError):
x509.RegisteredID(b"notanoid")
with pytest.raises(TypeError):
x509.RegisteredID(1.3)
def test_repr(self):
gn = x509.RegisteredID(x509.OID_COMMON_NAME)
assert repr(gn) == (
"<RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonNam"
"e)>)>"
)
def test_eq(self):
gn = x509.RegisteredID(x509.OID_COMMON_NAME)
gn2 = x509.RegisteredID(x509.OID_COMMON_NAME)
assert gn == gn2
def test_ne(self):
gn = x509.RegisteredID(x509.OID_COMMON_NAME)
gn2 = x509.RegisteredID(x509.OID_BASIC_CONSTRAINTS)
assert gn != gn2
assert gn != object()
class TestIPAddress(object):
def test_not_ipaddress(self):
with pytest.raises(TypeError):
x509.IPAddress(b"notanipaddress")
with pytest.raises(TypeError):
x509.IPAddress(1.3)
def test_repr(self):
gn = x509.IPAddress(ipaddress.IPv4Address(six.u("127.0.0.1")))
assert repr(gn) == "<IPAddress(value=127.0.0.1)>"
gn2 = x509.IPAddress(ipaddress.IPv6Address(six.u("ff::")))
assert repr(gn2) == "<IPAddress(value=ff::)>"
def test_eq(self):
gn = x509.IPAddress(ipaddress.IPv4Address(six.u("127.0.0.1")))
gn2 = x509.IPAddress(ipaddress.IPv4Address(six.u("127.0.0.1")))
assert gn == gn2
def test_ne(self):
gn = x509.IPAddress(ipaddress.IPv4Address(six.u("127.0.0.1")))
gn2 = x509.IPAddress(ipaddress.IPv4Address(six.u("127.0.0.2")))
assert gn != gn2
assert gn != object()
class TestSubjectAlternativeName(object):
def test_get_values_for_type(self):
san = x509.SubjectAlternativeName(
[x509.DNSName(six.u("cryptography.io"))]
)
names = san.get_values_for_type(x509.DNSName)
assert names == [six.u("cryptography.io")]
def test_iter_names(self):
san = x509.SubjectAlternativeName([
x509.DNSName(six.u("cryptography.io")),
x509.DNSName(six.u("crypto.local")),
])
assert len(san) == 2
assert list(san) == [
x509.DNSName(six.u("cryptography.io")),
x509.DNSName(six.u("crypto.local")),
]
def test_repr(self):
san = x509.SubjectAlternativeName(
[
x509.DNSName(six.u("cryptography.io"))
]
)
assert repr(san) == (
"<SubjectAlternativeName([<DNSName(value=cryptography.io)>])>"
)
| |
# Expressionparse v0.1 -- Create syntax trees for mathematical expressions
#
# Copyright (C) 2013, Peter Beard <peter.b.beard@gmail.com>
#
# This file is part of Expressionparse.
#
# Expressionparse is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Expressionparse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Expressionparse. If not, see <http://www.gnu.org/licenses/>.
import math
import re
import copy
# A general node-related exception
class NodeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Exception raised when tokenizing an expression
class TokenizeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Exception that's raised when parsing an expression
class ParseException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Exception that's raised when evaluating an expression
class EvalException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# The base node class. Implements evaluation and stringification functions.
class Node(object):
# Initialize the node
def __init__(self):
pass
# Set a variable
def setVariable(self, name, value):
return None
# Evaluate the node
def evaluate(self):
return None
# Return a nice-looking string representing the node
def toInfixNotation(self):
return self.__str__()
# Return a Polish notation string of the node
def toPolishNotation(self):
return self.__str__()
# Return a Reverse Polish notation string of the node
def toReversePolishNotation(self):
return self.__str__()
# Make a string representation of the node
def __str__(self):
return 'Empty Node (' + type(self).__name__ + ')'
# A class to tokenize input strings and feed the tokens to the parser
class Tokenizer(object):
# Some static constants
OPENPAREN = '('
CLOSEPAREN = ')'
# Initialize the tokenizer and tokenize the string
def __init__(self, string):
self.tokens = []
# First, strip out whitespace from the string
string = string.replace(' ','')
# Next replace adjacent parentheses with explicit multiplications so we can parse more easily
string = string.replace(')(',')*(')
# Check for unmatches parentheses
level = 0
for char in string:
if char == '(':
level += 1
elif char == ')':
level -= 1
if level != 0:
raise TokenizeException('Unmatched parenthesis.')
# Make variable multiplications written as adjacent characters (e.g. 3x, xy) explicit
p = re.compile('(\d+)(\w)')
string = p.sub(r'\1*\2',string)
p = re.compile('(\w)(\d+)')
string = p.sub(r'\1*\2',string)
p = re.compile('(\w)(?=\w)')
string = p.sub(r'\1*',string)
# Multiplication of parenthetical expression can also be written implicitly as 'x(...)' or '(...)x'
# Make these explicit here
p = re.compile('([\w\d]+)\(')
string = p.sub(r'\1*(',string)
p = re.compile('\)([\w\d]+)')
string = p.sub(r')*\1',string)
# The characters that we recognize
numbers = '01234567890.'
operators = '+-*/^!'
# Iterate over the string and create tokens of the appropriate type
curr_value = Value()
for i in range(0,len(string)):
char = string[i]
if char == Tokenizer.OPENPAREN:
self.pushToken(char)
elif char == Tokenizer.CLOSEPAREN:
if len(curr_value) > 0:
self.pushToken(curr_value)
curr_value = Value()
self.pushToken(char)
elif char in numbers or (char == '-' and string[i+1] in numbers and len(curr_value) == 0 and string[i-1] != Tokenizer.CLOSEPAREN):
curr_value.append(char)
# Last value in the string
if i == len(string)-1:
self.pushToken(curr_value)
elif char in operators:
if len(curr_value) > 0:
self.pushToken(curr_value)
curr_value = Value()
self.pushToken(getOperation(char))
else:
if len(curr_value) > 0:
self.pushToken(curr_value)
curr_value = Value()
self.pushToken(Variable(char))
# Return the next token in the list (at the beginning)
def getToken(self):
if len(self.tokens) > 0:
return self.tokens.pop(0)
else:
return None
# Return the next token in the list without removing it
def peekToken(self):
if len(self.tokens) > 0:
return self.tokens[0]
else:
return None
# Add a token to the end of the list
def pushToken(self, token):
self.tokens.append(token)
# A class representing an expression tree. Contains logic for parsing strings.
# TODO: This class is probably not that different from the Node class, so they should probably be merged or this class should at least be simplified.
class Tree(Node):
# Initialize the tree
def __init__(self):
self.root = None
# Parse a string expression
def parse(self, expression):
# TODO: This function should be able to detect the type of notation and choose the correct parser
self.parseInfixNotation(expression)
# Parse a string expression written using Infix Notation
def parseInfixNotation(self, expression):
# Tokenize the expression
tokenizer = Tokenizer(expression)
# Iterate over the tokens
tokenIndex = 0
token = 0
curr_value = None
subtree_root = Operation()
prev_op = None
curr_op = None
self.root = Operation()
paren_stack = []
while token is not None:
tokenIndex += 1
token = tokenizer.getToken()
# No tokens left
if token is None:
# If there are no operations, the current value must be the entire tree
if len(subtree_root) == 0:
subtree_root = curr_value
elif curr_value is not None and len(subtree_root) < 2:
subtree_root.addChild(curr_value)
break
# Parse the token
if token == Tokenizer.OPENPAREN:
paren_stack.append(copy.deepcopy(subtree_root))
subtree_root = Operation()
prev_op = Operation()
curr_op = Operation()
elif token == Tokenizer.CLOSEPAREN:
paren_op = paren_stack.pop()
# Insert the parenthetical expression in the tree
if len(paren_op) < 2:
paren_op.addChild(subtree_root)
else:
paren_op.addWhereOpen(subtree_root)
# Re-root the tree and continue parsing
subtree_root = paren_op
prev_op = subtree_root
elif isinstance(token, Variable) or isinstance(token, Value):
if curr_value is None:
curr_value = token
if (tokenizer.peekToken() is None or tokenizer.peekToken() == Tokenizer.CLOSEPAREN) and prev_op is not None:
prev_op.addChild(curr_value)
curr_value = None
#else:
# raise ParseException("Too many values at token " + str(tokenIndex))
elif isinstance(token, Operation):
if curr_value == None and subtree_root.symbol == '?':
token.addChild(subtree_root.left)
subtree_root = token
prev_op = token
elif prev_op is not None and len(prev_op) > 0:
if curr_value != None:
prev_op.addChild(curr_value)
curr_value = None
curr_op = token
# Determine parent-child relationship based on operation weights
# If the next node is heavier than the current one (e.g. * v. +), add it as a child of the current node and make the current node the root of the tree
if curr_op.weight > prev_op.weight:
c = prev_op.removeChild()
prev_op.addChild(curr_op)
curr_op.addChild(c)
subtree_root = prev_op
# If the current and next nodes have the same weight, add the next node as a child of the current one -- note that this is the same as what we do when the next node is heavier BUT we do NOT re-root the tree
elif curr_op.weight == prev_op.weight:
c = prev_op.removeChild()
prev_op.addChild(curr_op)
curr_op.addChild(c)
# If the next node is lighter than the current one, add the current node as a child of the next one and make the next one the root of the tree
else:
curr_op.addChild(subtree_root)
subtree_root = curr_op
prev_op = curr_op
else:
prev_op = token
prev_op.addChild(curr_value)
subtree_root = prev_op
curr_value = None
# An undefined operation with only one child can be simplified. Let's.
if isinstance(subtree_root, Operation) and subtree_root.symbol == '?' and subtree_root.right == None:
self.root = subtree_root.left
else:
self.root = subtree_root
# Set the value of a variable in the tree
def setVariable(self, name, value):
if isinstance(self.root, Operation):
self.root.setVariable(name, value)
elif isinstance(self.root, Variable) and self.root.name == name:
self.root.set(value)
# Try to simplify the tree
def simplify(self):
try:
self.root.simplify()
except EvalException:
return False
# Try to evaluate the simplified root node
try:
self.root = self.root.evaluate()
return True
except EvalException:
return False
# Evaluate the entire tree
def evaluate(self):
return self.root.evaluate()
# Print the tree using Infix Notation
def toInfixNotation(self):
return self.root.toInfixNotation()
# Print the tree using Polish Notation
def toPolishNotation(self):
return self.root.toPolishNotation()
# Print the tree using Reverse Polish Notation
def toReversePolishNotation(self):
return self.root.toReversePolishNotation()
# Make a string representation of the tree
def __str__(self):
return self.root.__str__()
# Get the length of the tree
def __len__(self):
return len(self.root)
# Check if two trees are equal
def __eq__(self, other):
if isinstance(other, Tree):
return self.root == other.root
return False
# A class representing a numeric value, e.g. 5, -7, 2.1, etc.
class Value(Node):
# Initialize the node
def __init__(self, val=''):
self.value = str(val)
# Append a digit to the value
def append(self, digit):
self.value = self.value + str(digit)
# Simplify the node
def simplify(self):
return self
# Evaluate the node
def evaluate(self):
return float(self.value)
# The length of the value
def __len__(self):
return len(self.value)
# See if two values are equal
def __eq__(self, other):
if isinstance(other, Value):
return self.value == other.value
return False
# Return a string representation of the value
def __str__(self):
return self.value
# Class representing a variable, e.g. x
class Variable(Node):
# Initialize the node
def __init__(self, name=''):
self.name = str(name)
self.value = Value()
# Simplify the node
def simplify(self):
return self
# Evaluate the node
def evaluate(self):
try:
return self.value.evaluate()
except:
raise EvalException('Cannot evaluate expressions that contain uninitialized variables.')
# Set the value of the variable
def set(self, value):
if isinstance(value, Value):
self.value = value
else:
self.value = Value(value)
# Unset the value of the variable
def unset(self):
self.value = Value()
# Compare two variables
def __eq__(self, other):
if type(other) == type(self):
if self.name == other.name:
if self.value == other.value:
return True
else:
return False
else:
return False
else:
return False
# The length of the value
def __len__(self):
return len(self.name)
# Return a string representation of the value
def __str__(self):
try:
self.value.evaluate()
return '{' + self.name + '=' + str(self.value) + '}'
except:
return self.name
# A class representing a mathematical operation, e.g. plus, minus, etc.
class Operation(Node):
# Initialize the operation
def __init__(self):
self.left = None # Initialize left child to none
self.right = None # Initialize right child to none
self.parent = None # Initialize parent to none
self.weight = 0 # Default weight is 0
self.symbol = '?' # Default operator symbol is ?
self.arity = 2 # Default to binary operator
# Add a child to the node
def addChild(self, child):
if self.left == None:
self.left = child
child.parent = self
elif self.right == None:
self.right = child
child.parent = self
else:
raise NodeException('Node already has two children.')
# Remove a child from the node
def removeChild(self):
if self.right != None:
node = self.right
self.right = None
node.parent = None
elif self.left != None:
node = self.left
self.left = None
node.parent = None
else:
raise NodeException('Node has no children to remove.')
return node
# Find somewhere in this tree to add a child node. Return false if there are no open spots
def addWhereOpen(self, child):
# Can we have another child?
if self.right is None:
self.addChild(child)
return True
else:
# Try to add the new child to one of our child nodes
if isinstance(self.left, Operation) and isinstance(self.right, Operation):
# Try the left node first
success = self.left.addWhereOpen(child)
# Only try the right node if the left node failed
if not success:
success = self.right.addWhereOpen(child)
return success
# Can we insert into the left node?
elif isinstance(self.left, Operation):
return self.left.addWhereOpen(child)
# What about the right node?
elif isinstance(self.right, Operation):
return self.right.addWhereOpen(child)
# There was nowhere to insert another node
else:
return False
# Try to factor the node
def factor(self):
# Factor the children first (if possibe)
# Left child
try:
self.left = self.left.factor()
except:
pass
# Right child
try:
self.right = self.right.factor()
except:
pass
# Currently we only know how to factor sums of multiplications since both are commutative
parent_type = type(self).__name__
parent_weight = self.weight
child_type = type(self.left).__name__
# Make sure the children are both operations, both the same type, and have a greater weight
if isinstance(self.left, Operation) and type(self.left) == type(self.right) and self.left.weight - self.weight == 1:
# Get grandchildren
llgc = self.left.left
lrgc = self.left.right
rlgc = self.right.left
rrgc = self.right.right
common_factor_on_left = False
# Find the common factor (if any)
if llgc == rlgc:
common_factor = llgc
common_factor_on_left = True
different_left = lrgc
different_right = rrgc
elif llgc == rrgc:
common_factor = llgc
common_factor_on_left = True
different_left = lrgc
different_right = rlgc
elif lrgc == rlgc:
common_factor = lrgc
different_left = llgc
different_right = rrgc
elif lrgc == rrgc:
common_factor = lrgc
different_left = llgc
different_right = rlgc
else:
return self
# Create a new parent node with the type of the original child
if child_type == 'Times':
new_parent = Times()
elif child_type == 'Divide':
# This operation requires the common factor to be on the same side in both children
if llgc == rlgc or lrgc == rrgc:
new_parent = Divide()
else:
return self
elif child_type == 'Exponent':
# This operation requires the common factor to be on the same side in both children
if llgc == rlgc or lrgc == rrgc:
new_parent = Exponent()
else:
return self
else:
return self
# Create a new child node with the type of the original parent
if parent_type == 'Plus':
new_child = Plus()
elif parent_type == 'Minus':
new_child = Minus()
elif parent_type == 'Times':
new_child = Times()
elif parent_type == 'Divide':
new_child = Divide()
else:
return self
# Add the differing factors as children
new_child.addChild(different_left)
new_child.addChild(different_right)
# Add the common factor as a child of the times node
if common_factor_on_left:
new_parent.addChild(common_factor)
new_parent.addChild(new_child)
else:
new_parent.addChild(new_child)
new_parent.addChild(common_factor)
# Return the re-factored node
return new_parent
else:
return self
# Simplify the node
def simplify(self):
simplified = True
try:
lvalue = self.left.simplify()
self.left = lvalue
except EvalException:
simplified = False
try:
rvalue = self.right.simplify()
self.right = rvalue
except EvalException:
simplified = False
if simplified:
return Value(self.evaluate())
else:
return self
# Check whether the node contains a certain variable
def containsVariable(self, varname):
# Is the variable in the left child?
if isinstance(self.left, Variable) and self.left.name == varname:
return True
elif not isinstance(self.left, Value):
return self.left.containsVariable(varname)
# Is the variable in the right child?
if isinstance(self.right ,Variable) and self.right.name == varname:
return True
elif not isinstance(self.right, Value):
return self.right.containsVariable(varname)
# Didn't find the variable
return False
# Set the value of a variable in this node
def setVariable(self, name, value):
# See if the variable exists in the left and/or right subtrees
# Left side
if isinstance(self.left, Variable) and self.left.name == name:
self.left.set(value)
else:
self.left.setVariable(name, value)
# Right side
if isinstance(self.right, Variable) and self.right.name == name:
self.right.set(value)
else:
self.right.setVariable(name, value)
# Return the value of this node
def evaluate(self):
return None
# Return an Infix Notation string representing the operation
def toInfixNotation(self):
# Unary operators
if self.arity == 1:
lstring = self.left.toInfixNotation()
if isinstance(self.left, Operation) and self.weight > self.left.weight:
string = '(' + lstring + ')'
else:
string = lstring
string += self.symbol
# Binary operators
elif self.arity == 2:
lstring = self.left.toInfixNotation()
rstring = self.right.toInfixNotation()
string = ''
if ifInstance(self.left, Operation) and self.weight > self.left.weight:
string += '(' + lstring + ')'
else:
string += lstring
string += ' ' + self.symbol + ' '
if isinstance(self.right, Operation) and self.weight > self.right.weight:
string += '(' + rstring + ')'
else:
string += rstring
return string
# Return a Polish Notation string of the operation
def toPolishNotation(self):
if self.arity == 1:
lstring = self.left.toPolishNotation()
string = self.symbol + ' '
if isinstance(self.left, Operation) and self.weight > self.left.weight:
string += '(' + lstring + ')'
else:
# Pull off the operator if the left child has the same type
if type(self) == type(self.left):
string += lstring[2:]
else:
string += lstring
else:
lstring = self.left.toPolishNotation()
rstring = self.right.toPolishNotation()
string = self.symbol + ' '
if isinstance(self.left, Operation) and self.weight > self.left.weight:
string += '(' + lstring + ')'
else:
# Pull off the operator if the left child has the same type
if type(self) == type(self.left):
string += lstring[2:]
else:
string += lstring
string += ' '
if isinstance(self.right, Operation) and self.weight > self.right.weight:
string += '(' + rstring + ')'
else:
# Pull off the operator if the right child has the same type
if type(self) == type(self.right):
string += rstring[2:]
else:
string += rstring
return string
# Return a Reverse Polish Notation string of the operation
def toReversePolishNotation(self):
if self.arity == 1:
lstring = self.left.toReversePolishNotation()
if isinstance(self.left, Operation) and self.weight > self.left.weight:
string = '(' + lstring + ')'
else:
# Pull off the operator if the left child has the same type
if type(self) == type(self.left):
string = lstring[:-2]
else:
string = lstring
string += ' ' + self.symbol
else:
lstring = self.left.toReversePolishNotation()
rstring = self.right.toReversePolishNotation()
string = ''
if isinstance(self.left, Operation) and self.weight > self.left.weight:
string += '(' + lstring + ')'
else:
# Pull off the operator if the left child has the same type
if type(self) == type(self.left):
string += lstring[:-2]
else:
string += lstring
string += ' '
if isinstance(self.right, Operation) and self.weight > self.right.weight:
string += '(' + rstring + ')'
else:
# Pull off the operator if the right child has the same type
if type(self) == type(self.right):
string += rstring[:-2]
else:
string += rstring
string += ' ' + self.symbol
return string
# See if two operation nodes are equal
def __eq__(self, other):
if type(other) == type(self):
return (self.left == other.left) and (self.right == other.right)
return False
# Return the length of the node
def __len__(self):
left_len = 0
right_len = 0
# Get the lengths of the non-None children
if self.left is not None:
left_len = len(self.left)
if self.right is not None:
right_len = len(self.right)
# Return the sum of the lengths
return left_len + right_len
# Return a string representation of the node
def __str__(self):
# Unary operators
if self.arity == 1:
return '[ ' + self.left.__str__() + ' ' + self.symbol + ' ]'
# Binary operatorys
else:
return '[ ' + self.left.__str__() + ' ' + self.symbol + ' ' + self.right.__str__() + ' ]'
# Add two nodes together
class Plus(Operation):
# Initialize the node
def __init__(self):
super(Plus,self).__init__()
self.weight = 1
self.symbol = '+'
# Evaluate the node
def evaluate(self):
if self.left and self.right:
return self.left.evaluate() + self.right.evaluate()
else:
raise NodeException('Node does not have enough children.')
# Subtract two nodes
class Minus(Operation):
# Initialize the node
def __init__(self):
super(Minus,self).__init__()
self.weight = 1
self.symbol = '-'
# Evaluate the node
def evaluate(self):
if self.left and self.right:
return self.left.evaluate() - self.right.evaluate()
else:
raise NodeException('Node does not have enough children.')
# Multiply two nodes
class Times(Operation):
# Initialize the node
def __init__(self):
super(Times,self).__init__()
self.weight = 2
self.symbol = '*'
# Evaluate the node
def evaluate(self):
if self.left and self.right:
return self.left.evaluate() * self.right.evaluate()
else:
raise NodeException('Node does not have enough children.')
# Try to factor the node
def factor(self):
# Factor the children first (if possibe)
# Left child
try:
self.left = self.left.factor()
except:
pass
# Right child
try:
self.right = self.right.factor()
except:
pass
# Currently we only know how to factor sums of multiplications since both are commutative
parent_type = type(self).__name__
parent_weight = self.weight
child_type = type(self.left).__name__
# Make sure the children are both operations, both the same type, and have a greater weight
if isinstance(self.left, Operation) and type(self.left) == type(self.right) and self.left.weight - self.weight == 1:
if child_type != 'Exponent':
return super(Times,self).factor()
else:
# Get grandchildren
llgc = self.left.left
lrgc = self.left.right
rlgc = self.right.left
rrgc = self.right.right
common_factor_on_left = False
# Find the common factor (if any)
if llgc == rlgc:
common_factor = llgc
common_factor_on_left = True
different_left = lrgc
different_right = rrgc
elif llgc == rrgc:
common_factor = llgc
common_factor_on_left = True
different_left = lrgc
different_right = rlgc
elif lrgc == rlgc:
common_factor = lrgc
different_left = llgc
different_right = rrgc
elif lrgc == rrgc:
common_factor = lrgc
different_left = llgc
different_right = rlgc
else:
return self
# If the common factor is on the right, normal factoring rules apply
if not common_factor_on_left:
return super(Times,self).factor()
# Create a new parent node with the type of the original child
if child_type == 'Exponent':
# This operation requires the common factor to be on the same side in both children
if llgc == rlgc or lrgc == rrgc:
new_parent = Exponent()
else:
return self
else:
return self
# Since this is a multiplication, we need to convert to addition of the exponents
new_child = Plus()
# Add the differing factors as children
new_child.addChild(different_left)
new_child.addChild(different_right)
# Add the common factor as a child of the times node
new_parent.addChild(common_factor)
new_parent.addChild(new_child)
# Return the re-factored node
return new_parent
else:
return self
# Return an Infix Notation string representing the operation
def toInfixNotation(self):
lstring = self.left.toInfixNotation()
rstring = self.right.toInfixNotation()
if isinstance(self.left, Operation) and self.weight > self.left.weight:
lstring = '(' + lstring + ')'
if isinstance(self.right, Operation) and self.weight > self.right.weight:
rstring = '(' + rstring + ')'
# Multiplication of variables is usually written with the variables adjacent to each other
if isinstance(self.left, Variable) or isinstance(self.right, Variable):
return lstring + rstring
else:
return lstring + ' * ' + rstring
# Divide two nodes
class Divide(Operation):
# Initialize the node
def __init__(self):
super(Divide,self).__init__()
self.weight = 2
self.symbol = '/'
# Evaluate the node
def evaluate(self):
if self.left and self.right:
return self.left.evaluate() / self.right.evaluate()
else:
raise NodeException('Node does not have enough children.')
# Try to factor the node
def factor(self):
# Factor the children first (if possibe)
# Left child
try:
self.left = self.left.factor()
except:
pass
# Right child
try:
self.right = self.right.factor()
except:
pass
# Currently we only know how to factor sums of multiplications since both are commutative
parent_type = type(self).__name__
parent_weight = self.weight
child_type = type(self.left).__name__
# Make sure the children are both operations, both the same type, and have a greater weight
if isinstance(self.left, Operation) and type(self.left) == type(self.right) and self.left.weight - self.weight == 1:
if child_type != 'Exponent':
return super(Divide,self).factor()
else:
# Get grandchildren
llgc = self.left.left
lrgc = self.left.right
rlgc = self.right.left
rrgc = self.right.right
common_factor_on_left = False
# Find the common factor (if any)
if llgc == rlgc:
common_factor = llgc
common_factor_on_left = True
different_left = lrgc
different_right = rrgc
elif llgc == rrgc:
common_factor = llgc
common_factor_on_left = True
different_left = lrgc
different_right = rlgc
elif lrgc == rlgc:
common_factor = lrgc
different_left = llgc
different_right = rrgc
elif lrgc == rrgc:
common_factor = lrgc
different_left = llgc
different_right = rlgc
else:
return self
# If the common factor is on the right, normal factoring rules apply
if not common_factor_on_left:
return super(Divide,self).factor()
# Create a new parent node with the type of the original child
if child_type == 'Exponent':
# This operation requires the common factor to be on the same side in both children
if llgc == rlgc or lrgc == rrgc:
new_parent = Exponent()
else:
return self
else:
return self
# Since this is a multiplication, we need to convert to addition of the exponents
new_child = Minus()
# Add the differing factors as children
new_child.addChild(different_left)
new_child.addChild(different_right)
# Add the common factor as a child of the times node
new_parent.addChild(common_factor)
new_parent.addChild(new_child)
# Return the re-factored node
return new_parent
else:
return self
# Exponentiate two nodes
class Exponent(Operation):
# Initialize the node
def __init__(self):
super(Exponent,self).__init__()
self.weight = 3
self.symbol = '^'
# Evaluate the node
def evaluate(self):
if self.left and self.right:
lvalue = self.left.evaluate()
rvalue = self.right.evaluate()
# Exponents are dumb and mean when negative numbers are involved
if lvalue < 0:
if rvalue == int(rvalue):
return lvalue ** rvalue
else:
# The answer will be complex
return (lvalue + 0j) ** rvalue
else:
return lvalue ** rvalue
else:
raise NodeException('Node does not have enough children.')
# Calculate the factorial of a node
# ** This is an unary operator **
class Factorial(Operation):
# Initialize the node
def __init__(self):
super(Factorial,self).__init__()
self.weight = 4
self.symbol = '!'
self.arity = 1
# Add a child to the node
def addChild(self, child):
if self.left == None:
self.left = child
child.parent = self
else:
raise NodeException('Node already has one child.')
# Remove a child from the node
def removeChild(self):
if self.left != None:
c = self.left
self.left = None
c.parent = None
return c
else:
raise NodeException('Node has no children to remove.')
# Evaluate the node
def evaluate(self):
if self.left != None and self.right == None:
cvalue = self.left.evaluate()
# Right now factorial is only defined for the natural numbers
if cvalue >= 0 and cvalue == int(cvalue):
return math.factorial(cvalue)
else:
raise EvalException('Cannot compute the factorial of negative numbers or non-integers.')
else:
raise NodeException('Node does not have enough children.')
# Return an object of the correct type given the symbol representing an operation
def getOperation(operation_symbol):
if operation_symbol == '+':
return Plus()
elif operation_symbol == '-':
return Minus()
elif operation_symbol == '*':
return Times()
elif operation_symbol == '/':
return Divide()
elif operation_symbol == '^':
return Exponent()
elif operation_symbol == '!':
return Factorial()
else:
raise ParseException('Unknown operation "' + operation_symbol + '"')
| |
"""
Base classes used for the generation of code based on the model objects.
"""
from ..model.base import (SqlString, SqlSet)
from ..model.schema import (View, Table, Constraint, NamedConstraint,
SqlConstraint, LanguageConstraint, ValueTypeValue, ColumnarSchemaObject)
from .base import (SchemaScriptGenerator)
import time
PLATFORMS = ('mysql',)
class MySqlScriptGenerator(SchemaScriptGenerator):
"""
Generates MySql syntax for schema generation.
"""
def __init__(self):
SchemaScriptGenerator.__init__(self)
def is_platform(self, platforms):
"""
Checks if this generator is one of the supported platform grammars.
The "platforms" variable is produced by the Change.platforms property.
:param platforms:
:return: boolean
"""
for plat in platforms:
if plat.strip().lower() in PLATFORMS:
return True
return False
def _header(self, schema_object):
"""
Create the header comment for the schema file.
"""
return '-- Schema for ' + schema_object.name + \
'\n-- Generated on ' + time.asctime(time.gmtime(time.time())) + \
'\n\n'
def _generate_base_table(self, table):
"""
Generate the creation script for a Table.
http://dev.mysql.com/doc/refman/5.1/en/create-table.html
:param table: Table
:return: list(str)
"""
assert isinstance(table, Table)
# Note: do not use "IF NOT EXISTS", because that indicates upgrade.
constraint_sql = ''
sql = 'CREATE TABLE '
if table.catalog_name:
sql += _parse_name(table.catalog_name) + '.'
if table.schema_name:
sql += _parse_name(table.schema_name) + '.'
sql += _parse_name(table.table_name)
# Tablespace used?
input_validations = []
sql += ' (\n'
first = True
for col in table.columns:
if first:
first = False
sql += ' '
else:
sql += '\n , '
sql += _parse_name(col.name) + ' ' + _parse_value_type(
col.value_type)
for cst in col.constraints:
if (isinstance(cst, SqlConstraint) and
cst.constraint_type == 'inputvalidation'):
input_validations.append(cst)
if cst.constraint_type == 'notnull':
sql += ' NOT NULL'
elif (cst.constraint_type == 'nullable' or
cst.constraint_type == 'null'):
# print("null constraint")
sql += ' NULL'
if col.default_value is not None:
sql += ' DEFAULT ' + _escape_value_type_value(col.default_value)
if col.auto_increment:
sql += ' AUTO_INCREMENT'
# TODO add COMMENT, COLUMN_FORMAT, STORAGE support
for cst in col.constraints:
constraint_sql += _generate_base_constraints(
table, [col], cst)
# FIXME add clustered index table constraint checking
for cst in table.constraints:
assert isinstance(cst, Constraint)
if (isinstance(cst, SqlConstraint) and
cst.constraint_type in [
'valuerestriction', 'validatewrite', 'validate']):
input_validations.append(cst)
else:
constraint_sql += _generate_base_constraints(
table, cst.get_columns_by_names(table), cst)
sql += constraint_sql + '\n)'
# FIXME add table options
# FIXME add partition options
# FIXME make this selectable. For now, we'll hard-code it for the
# foreign key support.
sql += ' ENGINE=INNODB;\n'
if len(input_validations) > 0:
sql += _generate_validation_triggers(table, input_validations)
return [self._header(table), sql]
def _generate_base_view(self, view):
"""
Generate the creation script for a View.
:param view:
:return: list(str)
"""
assert isinstance(view, View)
sql = 'CREATE '
if view.replace_if_exists:
sql += 'OR REPLACE '
sql_string = view.select_query.get_for_platform('mysql')
if sql_string is None:
raise Exception("no mysql support for view " + view.name)
assert isinstance(sql_string, SqlString)
sql += 'VIEW ' + view.name + ' AS\n' + sql_string.sql + ';\n'
return [self._header(view), sql]
def _generate_base_sequence(self, sequence):
"""
Generate the creation script for a Sequence.
:param sequence:
:return: list(str)
"""
raise Exception("not implemented")
def _generate_base_procedure(self, procedure):
"""
Generate the creation script for a Procedure.
:param procedure:
:return: list(str)
"""
raise Exception("not implemented")
def _generate_upgrade_table(self, table):
"""
Generate the upgrade script for a Table.
:param table:
:return: list(str)
"""
# FIXME include dropping then recreating the triggers.
raise Exception("not implemented")
def _generate_upgrade_view(self, view):
"""
Generate the upgrade script for a View.
:param view:
:return: list(str)
"""
raise Exception("not implemented")
def _generate_upgrade_sequence(self, sequence):
"""
Generate the upgrade script for a Sequence.
:param sequence:
:return: list(str)
"""
raise Exception("not implemented")
def _generate_upgrade_procedure(self, procedure):
"""
Generate the upgrade script for a Procedure.
:param procedure:
:return: list(str)
"""
raise Exception("not implemented")
def _escape_value_type_value(vtv):
"""
:param vtv: ValueTypeValue
:return: str
"""
assert isinstance(vtv, ValueTypeValue)
if vtv.str_value is not None:
# FIXME look at proper escaping
return "'" + vtv.str_value.replace("'", "''")
elif vtv.boolean_value is not None:
if vtv.boolean_value:
return "1"
else:
return "0"
elif vtv.computed_value is not None:
return str(vtv.computed_value)
elif vtv.date_value is not None:
# FIXME see if we need proper conversion here
return str(vtv.date_value)
elif vtv.numeric_value is not None:
return str(vtv.numeric_value)
else:
return 'NULL'
def _parse_value_type(value_type):
val = value_type.strip().upper()
if val in ['BOOL', 'BOOLEAN']:
val = 'TINYINT'
return val
def _parse_name(name):
# TODO properly escape the name
return name.strip()
def _parse_index_option(option):
# TODO properly parse the option; for now, assume it's a string
if isinstance(option, str):
return option.strip()
else:
raise Exception("can only parse string index options")
def _generate_base_constraints(table, columns, ct):
assert isinstance(ct, Constraint)
constraint_sql = ''
column_names = ','.join(
[_parse_name(column.name) for column in columns])
assert isinstance(ct, Constraint)
if isinstance(ct, LanguageConstraint):
# code generation constraint, not used in schema generation
return ''
if isinstance(ct, SqlConstraint):
# For all the additional stuff that this tool doesn't
# support
if ct.constraint_type == 'native':
s = ct.sql
assert isinstance(s, SqlSet)
return ('\n , ' +
s.get_for_platform(PLATFORMS).sql)
return ''
name = None
if isinstance(ct, NamedConstraint):
name = ct.name
if ct.constraint_type == 'fulltextindex':
assert name is not None
constraint_sql += ('\n , FULLTEXT INDEX ' + name + ' (' +
column_names + ')')
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'spatialindex':
assert name is not None
constraint_sql += '\n , SPATIAL INDEX ' + name + \
' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'uniqueindex':
if name is None:
raise Exception("No name defined for unique index")
constraint_sql += '\n , CONSTRAINT ' + name + \
' UNIQUE INDEX'
if 'using' in ct.details:
constraint_sql += ' USING ' + ct.details['using']
constraint_sql += ' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'index':
assert name is not None
constraint_sql += '\n , INDEX ' + name
if 'using' in ct.details:
constraint_sql += ' USING ' + ct.details['using']
constraint_sql += ' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'fulltextkey':
assert name is not None
constraint_sql += '\n , FULLTEXT KEY ' + name + \
' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'spatialkey':
assert name is not None
constraint_sql += '\n , SPATIAL KEY ' + name + \
' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'primarykey':
assert name is not None
constraint_sql += '\n , CONSTRAINT ' + name + \
' PRIMARY KEY'
if 'using' in ct.details:
constraint_sql += 'USING ' + ct.details['using']
constraint_sql += ' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'uniquekey':
assert name is not None
constraint_sql += '\n , CONSTRAINT ' + name + \
' UNIQUE KEY'
if 'using' in ct.details:
constraint_sql += ' USING ' + ct.details['using']
constraint_sql += ' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'key':
assert name is not None
constraint_sql += '\n , KEY ' + name
if 'using' in ct.details:
constraint_sql += ' USING ' + ct.details['using']
constraint_sql += ' (' + _parse_name(column_names) + ')'
if 'option' in ct.details:
constraint_sql += ' ' + _parse_index_option(
ct.details['option'])
elif ct.constraint_type == 'foreignkey':
assert name is not None
if (('column' not in ct.details and
'columns' not in ct.details) or
'table' not in ct.details):
raise Exception("column and table must be in foreign "
"key; found in " + column_names + " in " +
table.table_name)
constraint_sql += '\n , FOREIGN KEY ' + name + ' (' + \
_parse_name(column_names) + \
') REFERENCES ' + \
_parse_name(ct.details['table']) + ' ('
if 'column' in ct.details:
constraint_sql += _parse_name(ct.details['column'])
elif 'columns' in ct.details:
constraint_sql += ",".join(
_parse_name(fc) for fc in ct.details['columns'])
else:
raise Exception("no column definition for foreignkey")
constraint_sql += ')'
if 'match' in ct.details:
# TODO details value should be FULL, PARTIAL, or SIMPLE
constraint_sql += ' MATCH ' + ct.details[
'match'].upper()
if 'delete' in ct.details:
# TODO option should be RESTRICT, CASCADE, SET NULL,
# or NO ACTION
constraint_sql += ' ON DELETE ' + ct.details[
'delete'].upper()
if 'update' in ct.details:
# TODO option should be RESTRICT, CASCADE, SET NULL,
# or NO ACTION
constraint_sql += ' ON UPDATE ' + ct.details[
'update'].upper()
# We allow other constraint types, because those could be used
# by other databases or tools.
return constraint_sql
def _generate_validation_triggers(table, csts):
assert isinstance(table, ColumnarSchemaObject)
assert len(csts) > 0
checks = ""
for cst in csts:
assert isinstance(cst, SqlConstraint)
msg = 'Input validation failed'
if 'message' in cst.details:
msg = cst.details['message']
sset = cst.sql
assert isinstance(sset, SqlSet)
sval = sset.get_for_platform(PLATFORMS).sql
for col in table.columns:
sval = sval.replace('{' + col.name + '}', 'NEW.' + col.name)
checks += (
' IF NOT (' + sval +
') THEN\n' +
" CALL ErrorMsg ('" + msg + "');\n"
' END IF;\n')
sql = (
'delimiter //\n' +
'CREATE TRIGGER insert_validation_' + table.name +
'\n BEFORE INSERT ON ' + table.name +
'\n FOR EACH ROW' +
'\nBEGIN\n' + checks +
'END; //\n' +
'CREATE TRIGGER update_validation_' + table.name +
'\n BEFORE UPDATE ON ' + table.name +
'\n FOR EACH ROW' +
'\nBEGIN\n' + checks +
'END; //' +
'\ndelimiter ;')
return sql
| |
"""
The :mod:`jedi.api.classes` module contains the return classes of the API.
These classes are the much bigger part of the whole API, because they contain
the interesting information about completion and goto operations.
"""
import re
from parso.cache import parser_cache
from parso.python.tree import search_ancestor
from jedi._compatibility import u
from jedi import settings
from jedi import common
from jedi.cache import memoize_method
from jedi.evaluate import representation as er
from jedi.evaluate import instance
from jedi.evaluate import imports
from jedi.evaluate import compiled
from jedi.evaluate.filters import ParamName
from jedi.evaluate.imports import ImportName
from jedi.api.keywords import KeywordName
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(evaluator, context):
"""
List sub-definitions (e.g., methods in class).
:type scope: Scope
:rtype: list of Definition
"""
filter = next(context.get_filters(search_global=True))
names = [name for name in filter.values()]
return [Definition(evaluator, n) for n in _sort_names_by_start_pos(names)]
class BaseDefinition(object):
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_sqlite3': 'sqlite3',
'__builtin__': '',
'builtins': '',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
def __init__(self, evaluator, name):
self._evaluator = evaluator
self._name = name
"""
An instance of :class:`parso.reprsentation.Name` subclass.
"""
self.is_keyword = isinstance(self._name, KeywordName)
# generate a path to the definition
self._module = name.get_root_context()
if self.in_builtin_module():
self.module_path = None
else:
self.module_path = self._module.py__file__()
"""Shows the file path of a module. e.g. ``/usr/lib/python2.7/os.py``"""
@property
def name(self):
"""
Name of variable/function/class/module.
For example, for ``x = None`` it returns ``'x'``.
:rtype: str or None
"""
return self._name.string_name
@property
def type(self):
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`jedi.Script.goto_definitions` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi import Script
>>> source = '''
... import keyword
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... for variable in [keyword, f, C, x]:
... variable'''
>>> script = Script(source)
>>> defs = script.goto_definitions()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs # doctest: +NORMALIZE_WHITESPACE
[<Definition module keyword>, <Definition class C>,
<Definition instance D>, <Definition def f>]
Finally, here is what you can get from :attr:`type`:
>>> defs[0].type
'module'
>>> defs[1].type
'class'
>>> defs[2].type
'instance'
>>> defs[3].type
'function'
"""
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
# TODO move this to their respective names.
definition = tree_name.get_definition()
if definition is not None and definition.type == 'import_from' and \
tree_name.is_definition():
resolve = True
if isinstance(self._name, imports.SubModuleName) or resolve:
for context in self._name.infer():
return context.api_type
return self._name.api_type
def _path(self):
"""The path to a module/class/function definition."""
def to_reverse():
name = self._name
if name.api_type == 'module':
try:
name = list(name.infer())[0].name
except IndexError:
pass
if name.api_type == 'module':
module_contexts = name.infer()
if module_contexts:
module_context, = module_contexts
for n in reversed(module_context.py__name__().split('.')):
yield n
else:
# We don't really know anything about the path here. This
# module is just an import that would lead in an
# ImportError. So simply return the name.
yield name.string_name
return
else:
yield name.string_name
parent_context = name.parent_context
while parent_context is not None:
try:
method = parent_context.py__name__
except AttributeError:
try:
yield parent_context.name.string_name
except AttributeError:
pass
else:
for name in reversed(method().split('.')):
yield name
parent_context = parent_context.parent_context
return reversed(list(to_reverse()))
@property
def module_name(self):
"""
The module name.
>>> from jedi import Script
>>> source = 'import json'
>>> script = Script(source, path='example.py')
>>> d = script.goto_definitions()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
json
"""
return self._module.name.string_name
def in_builtin_module(self):
"""Whether this is a builtin module."""
return isinstance(self._module, compiled.CompiledObject)
@property
def line(self):
"""The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[0]
@property
def column(self):
"""The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[1]
def docstring(self, raw=False, fast=True):
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, 1, len('def f'), 'example.py')
>>> doc = script.goto_definitions()[0].docstring()
>>> print(doc)
f(a, b=1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring. For function, it is call signature. If you need
actual docstring, use ``raw=True`` instead.
>>> print(script.goto_definitions()[0].docstring(raw=True))
Document for function f.
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
return _Help(self._name).docstring(fast=fast, raw=raw)
@property
def description(self):
"""A textual description of the object."""
return u(self._name.string_name)
@property
def full_name(self):
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, 3, len('os.path.join'), 'example.py')
>>> print(script.goto_definitions()[0].full_name)
os.path.join
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
"""
path = list(self._path())
# TODO add further checks, the mapping should only occur on stdlib.
if not path:
return None # for keywords the path is empty
with common.ignored(KeyError):
path[0] = self._mapping[path[0]]
for key, repl in self._tuple_mapping.items():
if tuple(path[:len(key)]) == key:
path = [repl] + path[len(key):]
return '.'.join(path if path[0] else path[1:])
def goto_assignments(self):
if self._name.tree_name is None:
return self
names = self._evaluator.goto(self._name.parent_context, self._name.tree_name)
return [Definition(self._evaluator, n) for n in names]
def _goto_definitions(self):
# TODO make this function public.
return [Definition(self._evaluator, d.name) for d in self._name.infer()]
@property
@memoize_method
def params(self):
"""
Raises an ``AttributeError``if the definition is not callable.
Otherwise returns a list of `Definition` that represents the params.
"""
def get_param_names(context):
param_names = []
if context.api_type == 'function':
param_names = list(context.get_param_names())
if isinstance(context, instance.BoundMethod):
param_names = param_names[1:]
elif isinstance(context, (instance.AbstractInstanceContext, er.ClassContext)):
if isinstance(context, er.ClassContext):
search = '__init__'
else:
search = '__call__'
names = context.get_function_slot_names(search)
if not names:
return []
# Just take the first one here, not optimal, but currently
# there's no better solution.
inferred = names[0].infer()
param_names = get_param_names(next(iter(inferred)))
if isinstance(context, er.ClassContext):
param_names = param_names[1:]
return param_names
elif isinstance(context, compiled.CompiledObject):
return list(context.get_param_names())
return param_names
followed = list(self._name.infer())
if not followed or not hasattr(followed[0], 'py__call__'):
raise AttributeError()
context = followed[0] # only check the first one.
return [Definition(self._evaluator, n) for n in get_param_names(context)]
def parent(self):
context = self._name.parent_context
if context is None:
return None
if isinstance(context, er.FunctionExecutionContext):
# TODO the function context should be a part of the function
# execution context.
context = er.FunctionContext(
self._evaluator, context.parent_context, context.tree_node)
return Definition(self._evaluator, context.name)
def __repr__(self):
return "<%s %s>" % (type(self).__name__, self.description)
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
:return str: Returns the line(s) of code or an empty string if it's a
builtin.
"""
if self.in_builtin_module():
return ''
path = self._name.get_root_context().py__file__()
lines = parser_cache[self._evaluator.grammar._hashed][path].lines
index = self._name.start_pos[0] - 1
start_index = max(index - before, 0)
return ''.join(lines[start_index:index + after + 1])
class Completion(BaseDefinition):
"""
`Completion` objects are returned from :meth:`api.Script.completions`. They
provide additional information about a completion.
"""
def __init__(self, evaluator, name, stack, like_name_length):
super(Completion, self).__init__(evaluator, name)
self._like_name_length = like_name_length
self._stack = stack
# Completion objects with the same Completion name (which means
# duplicate items in the completion)
self._same_name_completions = []
def _complete(self, like_name):
append = ''
if settings.add_bracket_after_function \
and self.type == 'Function':
append = '('
if isinstance(self._name, ParamName) and self._stack is not None:
node_names = list(self._stack.get_node_names(self._evaluator.grammar._pgen_grammar))
if 'trailer' in node_names and 'argument' not in node_names:
append += '='
name = self._name.string_name
if like_name:
name = name[self._like_name_length:]
return name + append
@property
def complete(self):
"""
Return the rest of the word, e.g. completing ``isinstance``::
isinstan# <-- Cursor is here
would return the string 'ce'. It also adds additional stuff, depending
on your `settings.py`.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would give a ``Completion`` which `complete`
would be `am=`
"""
return self._complete(True)
@property
def name_with_symbols(self):
"""
Similar to :attr:`name`, but like :attr:`name` returns also the
symbols, for example assuming the following function definition::
def foo(param=0):
pass
completing ``foo(`` would give a ``Completion`` which
``name_with_symbols`` would be "param=".
"""
return self._complete(False)
def docstring(self, raw=False, fast=True):
if self._like_name_length >= 3:
# In this case we can just resolve the like name, because we
# wouldn't load like > 100 Python modules anymore.
fast = False
return super(Completion, self).docstring(raw=raw, fast=fast)
@property
def description(self):
"""Provide a description of the completion object."""
# TODO improve the class structure.
return Definition.description.__get__(self)
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name.string_name)
@memoize_method
def follow_definition(self):
"""
Return the original definitions. I strongly recommend not using it for
your completions, because it might slow down |jedi|. If you want to
read only a few objects (<=20), it might be useful, especially to get
the original docstrings. The basic problem of this function is that it
follows all results. This means with 1000 completions (e.g. numpy),
it's just PITA-slow.
"""
defs = self._name.infer()
return [Definition(self._evaluator, d.name) for d in defs]
class Definition(BaseDefinition):
"""
*Definition* objects are returned from :meth:`api.Script.goto_assignments`
or :meth:`api.Script.goto_definitions`.
"""
def __init__(self, evaluator, definition):
super(Definition, self).__init__(evaluator, definition)
@property
def description(self):
"""
A description of the :class:`.Definition` object, which is heavily used
in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
Example:
>>> from jedi import Script
>>> source = '''
... def f():
... pass
...
... class C:
... pass
...
... variable = f if random.choice([0,1]) else C'''
>>> script = Script(source, column=3) # line is maximum by default
>>> defs = script.goto_definitions()
>>> defs = sorted(defs, key=lambda d: d.line)
>>> defs
[<Definition def f>, <Definition class C>]
>>> str(defs[0].description) # strip literals in python2
'def f'
>>> str(defs[1].description)
'class C'
"""
typ = self.type
tree_name = self._name.tree_name
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
if typ == 'function':
# For the description we want a short and a pythonic way.
typ = 'def'
return typ + ' ' + u(self._name.string_name)
elif typ == 'param':
code = search_ancestor(tree_name, 'param').get_code(
include_prefix=False,
include_comma=False
)
return typ + ' ' + code
definition = tree_name.get_definition() or tree_name
# Remove the prefix, because that's not what we want for get_code
# here.
txt = definition.get_code(include_prefix=False)
# Delete comments:
txt = re.sub('#[^\n]+\n', ' ', txt)
# Delete multi spaces/newlines
txt = re.sub('\s+', ' ', txt).strip()
return txt
@property
def desc_with_module(self):
"""
In addition to the definition, also return the module.
.. warning:: Don't use this function yet, its behaviour may change. If
you really need it, talk to me.
.. todo:: Add full path. This function is should return a
`module.class.function` path.
"""
position = '' if self.in_builtin_module else '@%s' % (self.line)
return "%s:%s%s" % (self.module_name, self.description, position)
@memoize_method
def defined_names(self):
"""
List sub-definitions (e.g., methods in class).
:rtype: list of Definition
"""
defs = self._name.infer()
return sorted(
common.unite(defined_names(self._evaluator, d) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)
def is_definition(self):
"""
Returns True, if defined as a name in a statement, function or class.
Returns False, if it's a reference to such a definition.
"""
if self._name.tree_name is None:
return True
else:
return self._name.tree_name.is_definition()
def __eq__(self, other):
return self._name.start_pos == other._name.start_pos \
and self.module_path == other.module_path \
and self.name == other.name \
and self._evaluator == other._evaluator
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name.start_pos, self.module_path, self.name, self._evaluator))
class CallSignature(Definition):
"""
`CallSignature` objects is the return value of `Script.function_definition`.
It knows what functions you are currently in. e.g. `isinstance(` would
return the `isinstance` function. without `(` it would return nothing.
"""
def __init__(self, evaluator, executable_name, bracket_start_pos, index, key_name_str):
super(CallSignature, self).__init__(evaluator, executable_name)
self._index = index
self._key_name_str = key_name_str
self._bracket_start_pos = bracket_start_pos
@property
def index(self):
"""
The Param index of the current call.
Returns None if the index cannot be found in the curent call.
"""
if self._key_name_str is not None:
for i, param in enumerate(self.params):
if self._key_name_str == param.name:
return i
if self.params:
param_name = self.params[-1]._name
if param_name.tree_name is not None:
if param_name.tree_name.get_definition().star_count == 2:
return i
return None
if self._index >= len(self.params):
for i, param in enumerate(self.params):
tree_name = param._name.tree_name
if tree_name is not None:
# *args case
if tree_name.get_definition().star_count == 1:
return i
return None
return self._index
@property
def bracket_start(self):
"""
The indent of the bracket that is responsible for the last function
call.
"""
return self._bracket_start_pos
def __repr__(self):
return '<%s: %s index %s>' % \
(type(self).__name__, self._name.string_name, self.index)
class _Help(object):
"""
Temporary implementation, will be used as `Script.help() or something in
the future.
"""
def __init__(self, definition):
self._name = definition
@memoize_method
def _get_contexts(self, fast):
if isinstance(self._name, ImportName) and fast:
return {}
if self._name.api_type == 'statement':
return {}
return self._name.infer()
def docstring(self, fast=True, raw=True):
"""
The docstring ``__doc__`` for any object.
See :attr:`doc` for example.
"""
# TODO: Use all of the followed objects as output. Possibly divinding
# them by a few dashes.
for context in self._get_contexts(fast=fast):
return context.py__doc__(include_call_signature=not raw)
return ''
| |
#!/usr/bin/env python
# Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
"""
gpstandbywatch.py
Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
Check actual contents and process state of syncmaster
in order to properly return accurate information back to
gpinitstandby via gpstart.
"""
import os
import sys
import glob
import time
from gppylib.gplog import setup_tool_logging, get_default_logger
from gppylib.commands import gp, unix
def matching_files(pglogdir, ignore=None, setlimit=False):
"""
Generate a series of file names corresponding to files
in 'pglogdir' which are not in the specified 'ignore' map.
Note that startup.log is always returned if present.
If 'setlimit' is not false, files whose modification time
exceeds the ctime of startup.log will also be ignored.
"""
mlimit = None
pattern = os.path.join(pglogdir, 'startup.log')
for path in glob.glob(pattern):
if setlimit:
mlimit = os.stat(path).st_ctime
yield path
pattern = os.path.join(pglogdir, '*.csv')
for path in glob.glob(pattern):
if ignore is not None and path in ignore:
continue
if mlimit is not None and os.stat(path).st_mtime >= mlimit:
continue
yield path
def updated_files(pglogdir, ignore, prev):
"""
Generate a series of (time, path) tuples corresponding to files
in 'pglogdir' and not in 'ignore' which were also modified
after 'prev' (or all files if 'prev' is None).
"""
for path in matching_files(pglogdir, ignore):
ts = os.stat(path).st_mtime
if prev is None or prev < ts:
yield (ts, path)
def updated_handles(pglogdir, ignore, prev, handles):
"""
Generate a series of (time, handle) tuples corresponding to files
in 'pglogdir' and not in 'ignore' modified after 'prev'
(or all files if 'prev' is None).
"""
for ts, path in updated_files(pglogdir, ignore, prev):
h = handles.get(path, None)
if h is None:
h = open(path, 'r')
handles[path] = h
yield (ts, h)
class SyncmasterWatcher:
"""
Watch changes to files in the pg_log directory recorded by the gpsyncmaster.
"""
def __init__(self, datadir):
"""
Build a map containing the existing contents of the pg_log
directory so that we can avoid getting confused by them
after we start the syncmaster.
"""
self.datadir = datadir
self.pglogdir = os.path.join(self.datadir, 'pg_log')
# note use of setlimit=True here to prevent any files created
# after startup.log from becoming ignored.
self.ignore = {}
for path in matching_files( self.pglogdir, setlimit=True ):
self.ignore[path] = True
self.handles = {}
self.maxlines = 1000
self.timelimit = 10
self.delay = 0.1
def tail_briefly(self):
"""
Generate lines recently added to log files in the pg_log directory
updated after our __init__ constructor was called.
"""
start = time.time() # starting time
elapsed = 0 # time elapsed so far
count = 0 # number of lines we've seen
tp = None
# until we're out of time or have returned enough lines
while elapsed < self.timelimit and count < self.maxlines:
# for each file modified since we last checked
tn = None
for ts, h in updated_handles(self.pglogdir, self.ignore, tp, self.handles):
# track the last file modification time
if tn is None or tn < ts:
tn = ts
# yield the new lines to the caller
while count < self.maxlines:
line = h.readline()
if not line:
break
yield line
count += 1
# update the elapsed time
elapsed = time.time() - start
# if any new lines, update prev and keep checking for more
if tn is not None:
tp = tn
continue
# if we get here it means none of the files were updated in
# our last iteration. sleep a moment before checking for
# more updates
time.sleep(self.delay)
def monitor_logs(self):
"""
Read the syncmaster log files for a few seconds, looking for
potential problems.
Returns 0 if no problems were seen or or 1 if the startup log
contained an error or if the gpsyncmaster process exited before
we were done watching.
"""
logger.info("Monitoring logs")
# now scan some of the syncmaster output for a moment
for line in self.tail_briefly():
if line.startswith('Traceback'): # gpsyncmaster traceback recorded
logger.warning(line)
return 1
# MPP-13212 - since the syncmaster reports rejected client connections
# as 'FATAL' errors, the presence of a 'FATAL' error need not indicate
# a problem in the syncmaster so we comment out the following logic:
#
# if line.find('FATAL') >= 0: # fatal error recorded
# logger.warning(line)
# return 1
#
# This is especially important for health monitoring clients which may
# rely on the difference between a rejected connection and a TCP failure.
if line.find('could not bind IPv4 socket') >= 0: # syncmaster used IPv6 by mistake
logger.warning(line)
return 1
if line.find('QDSYNC: scan forward') >= 0: # syncmaster appears to be working
logger.info(line)
break
logger.info("checking if syncmaster is running")
pid = gp.getSyncmasterPID('localhost', self.datadir)
if not pid > 0:
logger.warning("syncmaster not running")
return 1
# syncmaster is running and there are no obvious errors in the log
logger.info("syncmaster appears ok, pid %s" % pid)
return 0
def close(self):
"""
Closes all handles to the logs we're watching.
"""
for h in self.handles.values():
h.close()
self.handles = {}
if __name__ == '__main__':
# setup gpAdminLogs logging
execname = os.path.split(sys.argv[0])[-1]
hostname = unix.getLocalHostname()
username = unix.getUserName()
setup_tool_logging(execname, hostname, username)
logger = get_default_logger()
# watch syncmaster logs
watcher = SyncmasterWatcher( sys.argv[1] )
rc = watcher.monitor_logs()
watcher.close()
# report final status
logger.info("exiting with %s" % rc)
sys.exit( rc )
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Edgar Magana, Cisco Systems, Inc.
# @author: Arvind Somya, Cisco Systems, Inc. (asomya@cisco.com)
#
"""
PlugIn for Nexus OS driver
"""
import logging
from quantum.common import exceptions as exc
from quantum.openstack.common import importutils
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.common import cisco_credentials_v2 as cred
from quantum.plugins.cisco.common import cisco_exceptions as excep
from quantum.plugins.cisco.db import network_db_v2 as cdb
from quantum.plugins.cisco.db import nexus_db_v2 as nxos_db
from quantum.plugins.cisco.l2device_plugin_base import L2DevicePluginBase
from quantum.plugins.cisco.nexus import cisco_nexus_configuration as conf
LOG = logging.getLogger(__name__)
class NexusPlugin(L2DevicePluginBase):
"""
Nexus PLugIn Main Class
"""
_networks = {}
def __init__(self):
"""
Extracts the configuration parameters from the configuration file
"""
# Initialize the nxos db
nxos_db.initialize()
self._client = importutils.import_object(conf.NEXUS_DRIVER)
LOG.debug(_("Loaded driver %s"), conf.NEXUS_DRIVER)
self._nexus_switches = conf.NEXUS_DETAILS
self.credentials = {}
def get_credential(self, nexus_ip):
if nexus_ip not in self.credentials:
_nexus_username = cred.Store.get_username(nexus_ip)
_nexus_password = cred.Store.get_password(nexus_ip)
self.credentials[nexus_ip] = {
'username': _nexus_username,
'password': _nexus_password
}
return self.credentials[nexus_ip]
def get_all_networks(self, tenant_id):
"""
Returns a dictionary containing all
<network_uuid, network_name> for
the specified tenant.
"""
LOG.debug(_("NexusPlugin:get_all_networks() called"))
return self._networks.values()
def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id,
host, instance):
"""
Create a VLAN in the appropriate switch/port,
and configure the appropriate interfaces
for this VLAN
"""
LOG.debug(_("NexusPlugin:create_network() called"))
# Grab the switch IP and port for this host
switch_ip = ''
port_id = ''
for switch in self._nexus_switches.keys():
for hostname in self._nexus_switches[switch].keys():
if str(hostname) == str(host):
switch_ip = switch
port_id = self._nexus_switches[switch][hostname]['ports']
# Check if this network is already in the DB
binding = nxos_db.get_port_vlan_switch_binding(
port_id, vlan_id, switch_ip)
if not binding:
_nexus_ip = switch_ip
_nexus_ports = (port_id,)
_nexus_ssh_port = \
self._nexus_switches[switch_ip]['ssh_port']['ssh_port']
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
# Check for vlan/switch binding
vbinding = nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
if not vbinding:
# Create vlan and trunk vlan on the port
self._client.create_vlan(
vlan_name, str(vlan_id), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port, vlan_id)
else:
# Only trunk vlan on the port
man = self._client.nxos_connect(_nexus_ip,
int(_nexus_ssh_port),
_nexus_username,
_nexus_password)
self._client.enable_vlan_on_trunk_int(man,
port_id,
vlan_id)
nxos_db.add_nexusport_binding(port_id, str(vlan_id),
switch_ip, instance)
new_net_dict = {const.NET_ID: net_id,
const.NET_NAME: net_name,
const.NET_PORTS: {},
const.NET_VLAN_NAME: vlan_name,
const.NET_VLAN_ID: vlan_id}
self._networks[net_id] = new_net_dict
return new_net_dict
def delete_network(self, tenant_id, net_id, **kwargs):
"""
Deletes the VLAN in all switches, and removes the VLAN configuration
from the relevant interfaces
"""
LOG.debug(_("NexusPlugin:delete_network() called"))
def get_network_details(self, tenant_id, net_id, **kwargs):
"""
Returns the details of a particular network
"""
LOG.debug(_("NexusPlugin:get_network_details() called"))
network = self._get_network(tenant_id, net_id)
return network
def update_network(self, tenant_id, net_id, **kwargs):
"""
Updates the properties of a particular
Virtual Network.
"""
LOG.debug(_("NexusPlugin:update_network() called"))
def get_all_ports(self, tenant_id, net_id, **kwargs):
"""
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:get_all_ports() called"))
def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs):
"""
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:create_port() called"))
def delete_port(self, device_id, vlan_id):
"""
Delete port bindings from the database and scan
whether the network is still required on
the interfaces trunked
"""
LOG.debug(_("NexusPlugin:delete_port() called"))
# Delete DB row for this port
row = nxos_db.get_nexusvm_binding(vlan_id, device_id)
if row:
nxos_db.remove_nexusport_binding(row['port_id'], row['vlan_id'],
row['switch_ip'],
row['instance_id'])
# Check for any other bindings with the same vlan_id and switch_ip
bindings = nxos_db.get_nexusvlan_binding(
row['vlan_id'], row['switch_ip'])
if not bindings:
# Delete this vlan from this switch
_nexus_ip = row['switch_ip']
_nexus_ports = (row['port_id'],)
_nexus_ssh_port = \
self._nexus_switches[_nexus_ip]['ssh_port']['ssh_port']
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
self._client.delete_vlan(
str(row['vlan_id']), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port)
return row['instance_id']
def update_port(self, tenant_id, net_id, port_id, port_state, **kwargs):
"""
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:update_port() called"))
def get_port_details(self, tenant_id, net_id, port_id, **kwargs):
"""
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:get_port_details() called"))
def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id,
**kwargs):
"""
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:plug_interface() called"))
def unplug_interface(self, tenant_id, net_id, port_id, **kwargs):
"""
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:unplug_interface() called"))
def _get_vlan_id_for_network(self, tenant_id, network_id, context,
base_plugin_ref):
"""
Obtain the VLAN ID given the Network ID
"""
vlan = cdb.get_vlan_binding(network_id)
return vlan.vlan_id
def _get_network(self, tenant_id, network_id, context, base_plugin_ref):
"""
Gets the NETWORK ID
"""
network = base_plugin_ref._get_network(context, network_id)
if not network:
raise exc.NetworkNotFound(net_id=network_id)
return {const.NET_ID: network_id, const.NET_NAME: network.name,
const.NET_PORTS: network.ports}
| |
import boto3
import json
import re
import collections
import datetime
import pytz
from params import Params
USERS_FOLDER = 'users/'
STATE_FOLDER = 'state/'
def getStateFilename(basename, ext, ord=None):
name = ''.join((STATE_FOLDER, basename.lower(), '.', ext))
if ord is not None:
name = ''.join((name, '.', str(ord)))
return name
def getUserFilename(user):
return ''.join((USERS_FOLDER, user.lower()))
def getExpiryTime(max_age=None):
return datetime.datetime.now(pytz.UTC) - \
datetime.timedelta(
seconds=Params.MAX_AGE if max_age is None else max_age)
def getFullHostname(host):
return ''.join((host, '.', Params.DOMAIN_ROOT))
def getMaxErrors():
return Params.MAX_ERRORS
def argToTuple(arg):
if arg is None:
return ()
elif isinstance(arg, str):
return (arg,)
else:
return tuple(arg)
def kickManager(session):
if Params.DO_SNS:
session.client('sns').publish(
TopicArn=Params.SNS_ARN,
Message='{"Event":"Change"}',
)
'''
The tuple returned by S3Bucket.iterStateFiles()
file: The S3 ObjectSummary for the file
item: The base filename
host: If the file references a hostname, the name
user: If the file references a hostname, the user portion
sub: If the file references a hostname, the option part
ip: If the file references an IP, the IP
ord: If the extension is followed by an integer ordinal, the value
'''
StateFile = collections.namedtuple(
'StateFile', 'file item host user sub ip ext ord')
'''
The tuple returned by S3Bucket.iterUserFiles()
file: The S3 ObjectSummary for the file
user: The username
'''
UserFile = collections.namedtuple(
'UserFile', 'file user')
class S3Bucket():
PING_EXT = 'ping'
HOLD_EXT = 'hold'
EXPIRED_EXT = 'expired'
ERROR_EXT = 'error'
LOCK_EXT = 'lock'
def __init__(self, session=None):
if session:
self.session = session
else:
self.session = boto3.Session()
self.s3 = self.session.resource("s3")
self.bucket = self.s3.Bucket(Params.S3_BUCKET)
self.files = {f.key: f for f in self.bucket.objects.all()}
def iterStateFiles(self, base=None, ext=None):
'''
A generator to iterate the state files.
base
is an optional string or iterable of strings
specifying which base filename(s) to include
ext
is an optional string or iterable of strings
specifying which filename extensions(s) to
include
The generator returns a StateFile namedtuple
'''
base = argToTuple(base)
ext = argToTuple(ext)
for k, f in self.files.iteritems():
match = re.match(
STATE_FOLDER +
'(?P<item>(?P<host>'
'(?P<user>[a-zA-Z0-9]+)(?:-(?P<sub>[a-zA-Z0-9]+))?)|'
'(?P<ip>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+))'
'\.(?P<ext>[a-z]+)(?:\.(?P<ord>[0-9]+))?$',
k)
if match and \
(not base or match.group('item') in base) and \
(not ext or match.group('ext') in ext):
yield StateFile(
file=f,
item=match.group('item'),
host=match.group('host'),
user=match.group('user'),
sub=match.group('sub'),
ip=match.group('ip'),
ext=match.group('ext'),
ord=int(match.group('ord')) if match.group('ord') else None)
def iterUserFiles(self):
'''
A generator to iterate the state files.
base
is an optional string or iterable of strings
specifying which base filename(s) to include
ext
is an optional string or iterable of strings
specifying which filename extensions(s) to
include
The generator returns a UserFile namedtuple
'''
for k, f in self.files.iteritems():
match = re.match(USERS_FOLDER + '([a-zA-Z0-9]+)', k)
if match:
yield UserFile(file=f, user=match.group(1))
def getFile(self, filename):
'''Return the S3 ObjectSummary for the specified file, or None'''
return self.files.get(filename)
def writeStateFile(self, name, ext, body, ord=None):
self.bucket.put_object(
Key=getStateFilename(name, ext, ord), Body=body)
def isLocked(self, name):
return getStateFilename(name, self.LOCK_EXT) in self.files
def getLockFile(self, name):
return self.getFile(getStateFilename(name, self.LOCK_EXT))
def writeLockFile(self, name, msg):
self.writeStateFile(name, self.LOCK_EXT, json.dumps({'error': msg}))
def getUserFile(self, user):
return self.getFile(getUserFilename(user))
def writeUserFile(self, user, body):
self.bucket.put_object(Key=getUserFilename(user), Body=body)
def writePingFile(self, hostname, ip, hold):
'''
Write or update the .ping file for the specified hostname.
If hold, also write a .hold file for the hostname. Otherwise
delete any existing .hold file.
'''
self.writeStateFile(
hostname, self.PING_EXT, json.dumps({'ip': ip}))
expired_file = self.getFile(getStateFilename(hostname, 'expired'))
if expired_file:
expired_file.delete()
hold_file = self.getFile(getStateFilename(hostname, self.HOLD_EXT))
if hold:
if not hold_file:
self.writeStateFile(
hostname, self.HOLD_EXT, json.dumps({'ip': ip}))
else:
if hold_file:
hold_file.delete()
def setHostIP(self, host, ip):
'''
Create or update the route 53 A record for the specified hostname.
'''
result = self.session.client('route53').change_resource_record_sets(
HostedZoneId=Params.ROUTE53_ZONE_ID,
ChangeBatch={
'Changes':
[
{
'Action': 'UPSERT',
'ResourceRecordSet':
{
'Name': getFullHostname(host),
'Type': 'A',
'TTL': Params.TTL,
'ResourceRecords':
[{'Value':
ip if ip else Params.DEFAULT_IP}]
}
}
]
})
change_info = result.get('ChangeInfo')
bOk = change_info and 'PENDING' == change_info.get('Status')
return (bOk, result)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import io
import logging
import os
import re
from urllib3.util.retry import Retry
from urllib3.exceptions import MaxRetryError
from celery.utils.log import get_task_logger
import requests
from ..apikeys import get_api_key
from ..conf import settings, statusdb, recsdb, file_cache
from ..exceptions import SourceError
from ..stations import get_station_map
from ..utils import get_rnd_item, get_uuid
import pytz
logger = get_task_logger(__name__)
class BaseSource(object):
"""Base source class to scrape online resources. This class is to be used
as parent of any scraping class.
Basic mechanism from fetching online resource to hosing extracted data in
the database, :meth:`openkongqi.source.BaseSource.scrape` is the main entry
point of this class.
"""
key_context = None
_now = None
def __init__(self, name):
"""
:param name: fetch name
:type name: str
"""
if name not in settings['SOURCES']:
raise SourceError('Unknown source ({})'.format(name))
self.name = name
self.target = settings['SOURCES'][name]['target']
self._station_map = get_station_map(
settings['SOURCES'][name]['uuid'])
self._api_key = get_api_key(name.split(":")[0])
self._tz = pytz.timezone(settings['SOURCES'][name]['tz'])
self._status = statusdb
self._cache = file_cache
self._records = recsdb
def scrape(self):
"""Main entry point for :class:`openkongqi.source.BaseSource` instances.
Orchestrates the resource scraping, following actions are performed:
* :meth:`openkongqi.source.BaseSource.fetch`: fetch online resource
* :meth:`openkongqi.source.BaseSource.save_status`: save fetching
status
* :meth:`openkongqi.source.BaseSource.cache`: save the resource in the
cache
* :meth:`openkongqi.source.BaseSource.extract`: extract the data
from the resource
* :meth:`openkongqi.source.BaseSource.save_data`: save extracted data
"""
self._now = datetime.now(pytz.utc)
src_content = self.fetch()
self.save_status()
if src_content is not None:
content = self.cache(src_content)
data = self.extract(content)
self.save_data(data)
def fetch(self):
"""Fetch the resource
.. warning:: This method has to be overwritten
:returns: content - a file-like object
"""
raise NotImplementedError
def post_fetch(self, resource):
"""Treat the resource after fetching raw content
.. warning:: This method has to be overwritten
:param resource: raw content / data (could be http, csv, xml, etc...)
:returns: content - a file-like object
"""
raise NotImplementedError
def get_station_uuid(self, name):
"""Return the uuid of a station
.. note:: This is not performing any recursive search in the map
"""
return get_uuid(settings['SOURCES'][self.name]['uuid'],
self._station_map[name]['uuid'])
def get_status_data(self):
"""Get the fetch status
:returns: data - a dict with data to serialize in the status entry
"""
return dict()
def save_status(self):
"""Save status data to keep track of fetching history.
Uses :meth:`openkongqi.source.BaseSource.get_status_data` to get the
status data to save.
"""
data = self.get_status_data()
if data is None:
data = {'ts': self._now.strftime('%Y%m%d%H%M%S')}
else:
data.setdefault('ts', self._now.strftime('%Y%m%d%H%M%S'))
self._status.set_status(self.name, data)
def cache(self, content):
"""Cache fetching content
:param content: content to cache
:type content: file-like object
"""
self._cache.set(self.name, content, self._now)
fp = self._cache.get_fp(self.name, self._now)
# display how much is cached into server
self.log_info("Cached {} kilobytes to server."
.format(os.path.getsize(fp)))
return open(fp, 'rb')
def pythonify(self, text, is_num=False):
if text is None:
return None
if self.null_re is not None:
if self.null_re.match(text) is None:
if is_num: # remove everything but nums and dots
return float(re.sub(r'[^\d.]+', '', text))
else:
return text
else:
return None
else:
if is_num: # remove everything but nums and dots
try:
return float(re.sub(r'[^\d.]+', '', text))
except ValueError:
pass
else:
return text
def extract(self, content):
"""Extract data from the content
"""
raise NotImplementedError
def save_data(self, data, ignore_check_latest=False):
self._records.write_records(data,
ignore_check_latest=ignore_check_latest,
context=self.key_context)
def get_latest(self, uuid):
return self._records.get_latest(uuid, context=self.key_context)
def get_records(self, uuid, start, end, filters=None):
return self._records.get_records(uuid=uuid,
start=start,
end=end,
context=self.key_context)
def log_debug(self, msg, *args, **kwargs):
self.log(logging.DEBUG, msg, *args, **kwargs)
def log_info(self, msg, *args, **kwargs):
self.log(logging.INFO, msg, *args, **kwargs)
def log_warning(self, msg, *args, **kwargs):
self.log(logging.WARNING, msg, *args, **kwargs)
def log_error(self, msg, *args, **kwargs):
self.log(logging.ERROR, msg, *args, **kwargs)
def log_critical(self, msg, *args, **kwargs):
self.log(logging.CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
_msg = "{} - {}".format(self.name, msg)
logger.log(level, _msg, *args, **kwargs)
class HTTPSource(BaseSource):
#: default HTTP method to use when performing the request
method='GET'
#: HTTP request timeout value in second
http_timeout = 10
#: SSL Verification
ssl_verify = True
def __init__(self, name):
super(HTTPSource, self).__init__(name)
def fetch(self):
req = self.get_req()
res = self.send(req)
if res is None:
self._info = None
self._statuscode = None
return None
self._info = res.headers
self._statuscode = res.status_code
try:
# don't use `resp.headers.get('content-length`)
# because if it doesn't exist in headers it will return None
# which makes it a false negative
content_length = res.headers['content-length']
except KeyError:
pass
else:
if (self._statuscode == 200 and content_length == '0'):
self.log_warning("Fetched content is empty; skipping cache ...")
return None
return self.post_fetch(res)
def send(self, req, timeout=None):
"""Wrap a :class:`Request <requests.Request>` in a session and send it.
Any type of error (status code not 2xx or exceptions) will be handled here
and the function then returns ``None``.
:param req: :class:`Request <requests.Request>` instance
:type req: requests.Request
:param timeout: (optional) How long to wait for the server to send data
before giving up
:type timeout: float or tuple
:rtype: requests.Response
"""
if req.url is None:
self.log_warning("no URL provided, abort fetch")
return None
if timeout is None:
timeout = self.http_timeout
retry_count = 3
retry_strategy = Retry(
total=retry_count,
status_forcelist=[413, 429, 500, 502, 503, 504],
)
adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
s = requests.Session()
s.mount("https://", adapter)
s.mount("http://", adapter)
s = requests.Session()
prepped = s.prepare_request(req)
try:
res = s.send(prepped, timeout=timeout, verify=self.ssl_verify)
except MaxRetryError:
self.log_error("fetch failed after {} retries".format(retry_count))
return None
except requests.Timeout as e:
self.log_error("fetch error: timeout ({})".format(e.request.url))
return None
except requests.RequestException as e:
self.log_error("fetch error: {}".format(e.request.url))
return None
if res.status_code != requests.codes.ok:
self.log_error("fetch error: status {} ({})".format(
res.status_code,
req.url,
)
)
self.log_debug(res.text)
return
s.close()
self.log_info("fetch success: {}".format(res.status_code))
self.log_debug(res.text)
return res
def post_fetch(self, response):
# The Bytes stream is required for the caching operations
# response.content is a bytes string
return io.BytesIO(response.content)
def get_req(self, **kwargs):
"""Return an :class:`requests.Request` instance
"""
req = requests.Request(
self.method,
self.get_url(**kwargs),
data=self.get_data(**kwargs),
json=self.get_json(**kwargs),
params=self.get_params(**kwargs),
headers=self.get_headers(**kwargs),
)
return req
def get_url(self, **kwargs):
"""Return the target URL, if ``None`` is returned, the scrape will be
gracefully stopped.
"""
return self.target
def get_data(self, **kwargs):
return {}
def get_json(self, **kwargs):
return None
def get_params(self, **kwargs):
return {}
def get_headers(self, **kwargs):
headers = {
'User-Agent': get_rnd_item(settings['UA_FILE']),
}
return headers
def get_status_data(self):
data = {
'code': self._statuscode
}
if self._info is not None and 'Last-Modified' in self._info:
data['last-modified'] = self._info['Last-Modified']
return data
| |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tools.paths import *
from tools.data.support import *
from argparse import ArgumentTypeError
from utils import columnate
try:
import tools.private_settings as ps
except:
ps = object()
TEST_CMSIS_LIB = join(TEST_DIR, "cmsis", "lib")
TEST_MBED_LIB = join(TEST_DIR, "mbed", "env")
PERIPHERALS = join(TEST_DIR, "peripherals")
BENCHMARKS_DIR = join(TEST_DIR, "benchmarks")
SD = join(TEST_DIR, "sd")
TMP102 = join(PERIPHERALS, 'TMP102')
AT30TSE75X = join(PERIPHERALS, 'AT30TSE75X')
"""
Wiring:
* Ground:
* LPC1*: p1
* KL25Z: GND
* Vout
* LPC1*: p40
* KL25Z: P3V3
* TMP102 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTC9, SCL=PTC8)
* MAXWSNENV: (SDA=TP6, SCL=TP5)
* digital_loop (Digital(In|Out|InOut), InterruptIn):
* Arduino headers: (D0 <-> D7)
* NUCLEO_*: (D2 <-> D9)
* LPC1549: (D2 <-> D7)
* LPC1*: (p5 <-> p25 )
* KL25Z: (PTA5<-> PTC6)
* MAXWSNENV: (TP3 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7)
* VK_RZ_A1H: (P3_2 <-> P5_6)
* port_loop (Port(In|Out|InOut)):
* Arduino headers: (D0 <-> D7), (D1 <-> D6)
* LPC1*: (p5 <-> p25), (p6 <-> p26)
* KL25Z: (PTA5 <-> PTC6), (PTA4 <-> PTC5)
* NUCLEO_F103RB: (PC_6 <-> PB_8), (PC_5 <-> PB_9)
* MAXWSNENV: (TP1 <-> TP3), (TP2 <-> TP4)
* MAX32600MBED: (P1_0 <-> P4_7), (P1_1 <-> P4_6)
* VK_RZ_A1H: (P3_2 <-> P5_6), (P3_7 <-> P5_1)
* analog_loop (AnalogIn, AnalogOut):
* Arduino headers: (A0 <-> A5)
* NUCLEO64: (A0 <-> A2)
* NUCLEO144: (A0 <-> D13)
* LPC1549: (A0 <-> D12)
* LPC1*: (p17 <-> p18 )
* KL25Z: (PTE30 <-> PTC2)
* analog_pot (AnalogIn):
* Arduino headers: (A0, A1)
* VK_RZ_A1H: (AN0, AN1)
* SD (SPI):
* LPC1*: (mosi=p11 , miso=p12 , sclk=p13 , cs=p14 )
* KL25Z: (mosi=PTD2, miso=PTD3, sclk=PTD1, cs=PTD0)
* MMA7660 (I2C):
* LPC1*: (SDA=p28 , SCL=p27)
* i2c_loop:
* LPC1768: (p28 <-> p9), (p27 <-> p10)
* NUCLEO64: (D14 <-> D3), (D15 <-> D6)
* NUCLEO144: (D14 <-> PB_11), (D15 <-> PB_10)
* i2c_eeprom:
* LPC1*: (SDA=p28 , SCL=p27)
* KL25Z: (SDA=PTE0, SCL=PTE1)
* VK_RZ_A1H:(SDA=P1_1, SCL=P1_0)
* can_transceiver:
* LPC1768: (RX=p9, TX=p10)
* LPC1549: (RX=D9, TX=D8)
* LPC4088: (RX=p9, TX=p10)
* VK_RZ_A1H:(RX=P5_9, TX=P5_10)
* NUCLEO_F091RC: (RX=PA_11, TX=PA_12)
* NUCLEO_F072RB: (RX=PA_11, TX=PA_12)
* NUCLEO_F042K6: (RX=PA_11, TX=PA_12)
* NUCLEO_F334R8: (RX=PA_11, TX=PA_12)
* NUCLEO_F303RE: (RX=PA_11, TX=PA_12)
* NUCLEO_F303K8: (RX=PA_11, TX=PA_12)
* NUCLEO_F302R8: (RX=PA_11, TX=PA_12)
* NUCLEO_F446RE: (RX=PA_11, TX=PA_12)
* NUCLEO_F446ZE: (RX=PA_11, TX=PA_12)
* DISCO_F469NI: (RX=PB_8, TX=PB_9)
* DISCO_F4269ZI: (RX=PA_11, TX=PA_12)
* NUCLEO_F103RB: (RX=PA_11, TX=PA_12)
* NUCLEO_F746ZG: (RX=PA_11, TX=PA_12)
* DISCO_F746NG: (RX=PB_8, TX=PB_9)
* DISCO_L476VG: (RX=PA_11, TX=PA_12)
* NUCLEO_L476RG: (RX=PA_11, TX=PA_12)
* NUCLEO_L432KC: (RX=PA_11, TX=PA_12)
* DISCO_F303VC: (RX=PA_11, TX=PA_12)
* NUCLEO_F412ZG: (RX=PA_11, TX=PA_12)
"""
TESTS = [
# Automated MBED tests
{
"id": "MBED_A1", "description": "Basic",
"source_dir": join(TEST_DIR, "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "mbed", "file"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_A3", "description": "C++ STL",
"source_dir": join(TEST_DIR, "mbed", "stl"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_A4", "description": "I2C TMP102",
"source_dir": join(TEST_DIR, "mbed", "i2c_TMP102"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, TMP102],
"automated": True,
"peripherals": ["TMP102"]
},
{
"id": "MBED_AT30TSE75X", "description": "I2C Temperature Sensor / EEPROM",
"source_dir": join(TEST_DIR, "mbed", "i2c_at30tse75x"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, AT30TSE75X],
"automated": False,
"peripherals": ["AT30TSE75X"]
},
{
"id": "MBED_A5", "description": "DigitalIn DigitalOut",
"source_dir": join(TEST_DIR, "mbed", "digitalin_digitalout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A6", "description": "DigitalInOut",
"source_dir": join(TEST_DIR, "mbed", "digitalinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A7", "description": "InterruptIn",
"source_dir": join(TEST_DIR, "mbed", "interruptin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["digital_loop"]
},
{
"id": "MBED_A8", "description": "Analog",
"source_dir": join(TEST_DIR, "mbed", "analog"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["analog_loop"],
"mcu": ["LPC1768", "LPC2368", "LPC2460", "KL25Z", "K64F", "K66F", "K22F", "LPC4088", "LPC1549",
"NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_F302R8", "NUCLEO_F303K8", "NUCLEO_F303RE", "NUCLEO_F207ZG",
"NUCLEO_F334R8", "NUCLEO_F303ZE", "NUCLEO_L053R8", "NUCLEO_L073RZ", "NUCLEO_L152RE",
"NUCLEO_F410RB", "NUCLEO_F446RE", "NUCLEO_F446ZE", "NUCLEO_F429ZI",
"DISCO_F407VG", "NUCLEO_F746ZG", "NUCLEO_L476RG",
"DISCO_L053C8", "DISCO_F334C8", "DISCO_L476VG", "DISCO_F469NI", "DISCO_F429ZI", "DISCO_F769NI",
"ARCH_MAX", "MAX32600MBED", "MOTE_L152RC", "B96B_F446VE"]
},
{
"id": "MBED_A9", "description": "Serial Echo at 115200",
"source_dir": join(TEST_DIR, "mbed", "echo"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "echo"
},
{
"id": "MBED_A10", "description": "PortOut PortIn",
"source_dir": join(TEST_DIR, "mbed", "portout_portin"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A11", "description": "PortInOut",
"source_dir": join(TEST_DIR, "mbed", "portinout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["port_loop"],
"supported": DEFAULT_SUPPORT,
"automated": True,
},
{
"id": "MBED_A12", "description": "SD File System",
"source_dir": join(TEST_DIR, "mbed", "sd"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"]
},
{
"id": "MBED_A13", "description": "I2C MMA7660 accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA7660"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA7660')],
"automated": True,
"peripherals": ["MMA7660"]
},
{
"id": "MBED_A14", "description": "I2C Master",
"source_dir": join(TEST_DIR, "mbed", "i2c_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A15", "description": "I2C Slave",
"source_dir": join(TEST_DIR, "mbed", "i2c_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A16", "description": "SPI Master",
"source_dir": join(TEST_DIR, "mbed", "spi_master"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A17", "description": "SPI Slave",
"source_dir": join(TEST_DIR, "mbed", "spi_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
},
{
"id": "MBED_A18", "description": "Interrupt vector relocation",
"source_dir": join(TEST_DIR, "mbed", "vtor_reloc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"mcu": ["LPC1768"],
"automated": True,
},
{
"id": "MBED_A19", "description": "I2C EEPROM read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
},
{
"id": "MBED_A20", "description": "I2C master/slave test",
"source_dir": join(TEST_DIR, "mbed", "i2c_master_slave"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB,],
"automated": True,
"peripherals": ["i2c_loop"]
},
{
"id": "MBED_A21", "description": "Call function before main (mbed_main)",
"source_dir": join(TEST_DIR, "mbed", "call_before_main"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_A22", "description": "SPIFI for LPC4088 (test 1)",
"source_dir": join(TEST_DIR, "mbed", "spifi1"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A23", "description": "SPIFI for LPC4088 (test 2)",
"source_dir": join(TEST_DIR, "mbed", "spifi2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC4088","LPC4088_DM"]
},
{
"id": "MBED_A24", "description": "Serial echo with RTS/CTS flow control",
"source_dir": join(TEST_DIR, "mbed", "echo_flow_control"),
"dependencies": [MBED_LIBRARIES],
"automated": "True",
"host_test": "echo_flow_control",
"mcu": ["LPC1768"],
"peripherals": ["extra_serial"]
},
{
"id": "MBED_A25", "description": "I2C EEPROM line read/write test",
"source_dir": join(TEST_DIR, "mbed", "i2c_eeprom_line"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["24LC256"],
"automated": True,
},
{
"id": "MBED_A26", "description": "AnalogIn potentiometer test",
"source_dir": join(TEST_DIR, "mbed", "analog_pot"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"peripherals": ["analog_pot"],
"automated": True,
},
{
"id": "MBED_A27", "description": "CAN loopback test",
"source_dir": join(TEST_DIR, "mbed", "can_loopback"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["can_transceiver"],
"mcu": ["LPC1549", "LPC1768","B96B_F446VE", "VK_RZ_A1H",
"NUCLEO_F091RC", "NUCLEO_F072RB", "NUCLEO_F042K6", "NUCLEO_F334R8", "NUCLEO_F207ZG",
"NUCLEO_F303RE", "NUCLEO_F303K8", "NUCLEO_F302R8", "NUCLEO_F446RE","NUCLEO_F446ZE",
"DISCO_F469NI", "DISCO_F429ZI", "NUCLEO_F103RB", "NUCLEO_F746ZG",
"DISCO_F746NG", "DISCO_L476VG", "NUCLEO_L476RG", "NUCLEO_L432KC",
"DISCO_F769NI", "NUCLEO_F767ZI", "DISCO_F303VC", "NUCLEO_F412ZG"]
},
{
"id": "MBED_A28", "description": "CAN loopback test",
"source_dir": join(TEST_DIR, "mbed", "can_loopback"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["B96B_F446VE",
"NUCLEO_F091RC", "NUCLEO_F072RB", "NUCLEO_F042K6", "NUCLEO_F334R8", "NUCLEO_F207ZG",
"NUCLEO_F303RE", "NUCLEO_F303K8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F446RE","NUCLEO_F446ZE",
"DISCO_F469NI", "DISCO_F429ZI", "NUCLEO_F103RB", "NUCLEO_F746ZG",
"DISCO_F746NG", "DISCO_L476VG", "NUCLEO_L476RG", "NUCLEO_L432KC",
"DISCO_F769NI", "NUCLEO_F767ZI", "DISCO_F303VC", "NUCLEO_F412ZG"]
},
{
"id": "MBED_A29", "description": "i2c_master_slave_asynch",
"source_dir": join(TEST_DIR, "mbed", "i2c_master_slave_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"peripherals": ["i2c_loop"]
},
{
"id": "MBED_BLINKY", "description": "Blinky",
"source_dir": join(TEST_DIR, "mbed", "blinky"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_BUS", "description": "Blinky BUS",
"source_dir": join(TEST_DIR, "mbed", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_BUSOUT", "description": "BusOut",
"source_dir": join(TEST_DIR, "mbed", "bus_out"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"exclude_mcu": ["NUCLEO_L011K4"],
"automated": True,
},
# Size benchmarks
{
"id": "BENCHMARK_1", "description": "Size (c environment)",
"source_dir": join(BENCHMARKS_DIR, "cenv"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_2", "description": "Size (float math)",
"source_dir": join(BENCHMARKS_DIR, "float_math"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_3", "description": "Size (printf)",
"source_dir": join(BENCHMARKS_DIR, "printf"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_4", "description": "Size (mbed libs)",
"source_dir": join(BENCHMARKS_DIR, "mbed"),
"dependencies": [MBED_LIBRARIES]
},
{
"id": "BENCHMARK_5", "description": "Size (all)",
"source_dir": join(BENCHMARKS_DIR, "all"),
"dependencies": [MBED_LIBRARIES]
},
# performance related tests
{
"id": "PERF_1", "description": "SD Stdio R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"]
},
{
"id": "PERF_2", "description": "SD FileHandle R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fhandle"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"]
},
{
"id": "PERF_3", "description": "SD FatFS R/W Speed",
"source_dir": join(TEST_DIR, "mbed", "sd_perf_fatfs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"]
},
# Not automated MBED tests
{
"id": "MBED_1", "description": "I2C SRF08",
"source_dir": join(TEST_DIR, "mbed", "i2c_SRF08"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'SRF08')],
"peripherals": ["SRF08"]
},
{
"id": "MBED_2", "description": "stdio",
"source_dir": join(TEST_DIR, "mbed", "stdio"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "stdio_auto"
},
{
"id": "MBED_3", "description": "PortOut",
"source_dir": join(TEST_DIR, "mbed", "portout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_4", "description": "Sleep",
"source_dir": join(TEST_DIR, "mbed", "sleep"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"mcu": ["LPC1768", "LPC11U24", "LPC4088","LPC4088_DM","NRF51822", "LPC11U68"]
},
{
"id": "MBED_5", "description": "PWM",
"source_dir": join(TEST_DIR, "mbed", "pwm"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB]
},
{
"id": "MBED_6", "description": "SW Reset",
"source_dir": join(TEST_DIR, "mbed", "reset"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_7", "description": "stdio benchmark",
"source_dir": join(TEST_DIR, "mbed", "stdio_benchmark"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_8", "description": "SPI",
"source_dir": join(TEST_DIR, "mbed", "spi"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_9", "description": "Sleep Timeout",
"source_dir": join(TEST_DIR, "mbed", "sleep_timeout"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_10", "description": "Hello World",
"source_dir": join(TEST_DIR, "mbed", "hello"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "hello_auto",
},
{
"id": "MBED_11", "description": "Ticker Int",
"source_dir": join(TEST_DIR, "mbed", "ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto",
},
{
"id": "MBED_12", "description": "C++",
"source_dir": join(TEST_DIR, "mbed", "cpp"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True
},
{
"id": "MBED_13", "description": "Heap & Stack",
"source_dir": join(TEST_DIR, "mbed", "heap_and_stack"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_14", "description": "Serial Interrupt",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_15", "description": "RPC",
"source_dir": join(TEST_DIR, "mbed", "rpc"),
"dependencies": [MBED_LIBRARIES, join(LIB_DIR, "rpc"), TEST_MBED_LIB],
"automated": False,
"mcu": ["LPC1768"]
},
{
"id": "MBED_16", "description": "RTC",
"source_dir": join(TEST_DIR, "mbed", "rtc"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"exclude_mcu": ["NRF51822", "NRF51822_BOOT", "NRF51822_OTA", "NRF51822_Y5_MBUG",
"NRF51_DK", "NRF51_DK_BOOT", "NRF51_DK_OTA",
"NRF51_MICROBIT", "NRF51_MICROBIT_B", "NRF51_MICROBIT_BOOT",
"NRF51_MICROBIT_B_BOOT", "NRF51_MICROBIT_B_OTA", "NRF51_MICROBIT_OTA",
"HRM1017", "HRM1017_BOOT", "HRM1701_OTA",
"NUCLEO_L011K4",
"TY51822R3", "TY51822R3_BOOT", "TY51822R3_OTA",
"NRF15_DONGLE", "NRF15_DONGLE_BOOT", "NRF15_DONGLE_OTA",
"ARCH_BLE", "ARCH_BLE_BOOT", "ARCH_BLE_OTA",
"ARCH_LINK", "ARCH_LINK_BOOT", "ARCH_LINK_OTA",
"RBLAB_BLENANO", "RBLAB_BLENANO_BOOT", "RBLAB_BLENANO_OTA",
"RBLAB_NRF51822", "RBLAB_NRF51822_BOOT", "RBLAB_NRF51822_OTA",
"SEEED_TINY_BLE", "SEEED_TINY_BLE_BOOT", "SEEED_TINY_BLE_OTA",
"WALLBOT_BLE", "WALLBOT_BLE_BOOT", "WALLBOT_BLE_OTA",
"DELTA_DFCM_NNN40", "DELTA_DFCM_NNN40_BOOT", "DELTA_DFCM_NNN40_OTA",
"LPC1114"],
#"host_test": "rtc_auto",
},
{
"id": "MBED_17", "description": "Serial Interrupt 2",
"source_dir": join(TEST_DIR, "mbed", "serial_interrupt_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_18", "description": "Local FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_19", "description": "SD FS Directory",
"source_dir": join(TEST_DIR, "mbed", "dir_sd"),
"dependencies": [MBED_LIBRARIES, FS_LIBRARY],
"peripherals": ["SD"]
},
{
"id": "MBED_20", "description": "InterruptIn 2",
"source_dir": join(TEST_DIR, "mbed", "interruptin_2"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_21", "description": "freopen Stream",
"source_dir": join(TEST_DIR, "mbed", "freopen"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_22", "description": "Semihost",
"source_dir": join(TEST_DIR, "mbed", "semihost"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "MBED_23", "description": "Ticker Int us",
"source_dir": join(TEST_DIR, "mbed", "ticker_2"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_24", "description": "Timeout Int us",
"source_dir": join(TEST_DIR, "mbed", "timeout"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_25", "description": "Time us",
"source_dir": join(TEST_DIR, "mbed", "time_us"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_26", "description": "Integer constant division",
"source_dir": join(TEST_DIR, "mbed", "div"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
},
{
"id": "MBED_27", "description": "SPI ADXL345",
"source_dir": join(TEST_DIR, "mbed", "spi_ADXL345"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'ADXL345')],
"peripherals": ["ADXL345"]
},
{
"id": "MBED_28", "description": "Interrupt chaining (InterruptManager)",
"source_dir": join(TEST_DIR, "mbed", "interrupt_chaining"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
},
{
"id": "MBED_29", "description": "CAN network test",
"source_dir": join(TEST_DIR, "mbed", "can"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H", "B96B_F446VE", "NUCLEO_F091RC",
"NUCLEO_F072RB", "NUCLEO_F042K6", "NUCLEO_F334R8", "NUCLEO_F303RE",
"NUCLEO_F303K8", "NUCLEO_F302R8", "NUCLEO_F446RE","NUCLEO_F446ZE", "DISCO_F469NI", "NUCLEO_F207ZG",
"DISCO_F429ZI", "NUCLEO_F103RB", "NUCLEO_F746ZG", "DISCO_F746NG",
"NUCLEO_L476RG", "NUCLEO_L432KC", "DISCO_F303VC", "NUCLEO_F412ZG"]
},
{
"id": "MBED_30", "description": "CAN network test using interrupts",
"source_dir": join(TEST_DIR, "mbed", "can_interrupt"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["LPC1768", "LPC4088", "LPC1549", "RZ_A1H", "B96B_F446VE", "NUCLEO_F091RC", "NUCLEO_F207ZG",
"NUCLEO_F072RB", "NUCLEO_F042K6", "NUCLEO_F334R8", "NUCLEO_F303RE",
"NUCLEO_F303K8", "NUCLEO_F302R8", "NUCLEO_F446RE", "NUCLEO_F446ZE", "DISCO_F469NI",
"DISCO_F429ZI", "NUCLEO_F103RB", "NUCLEO_F746ZG", "DISCO_F746NG",
"NUCLEO_L476RG", "NUCLEO_L432KC", "DISCO_F303VC", "NUCLEO_F412ZG"]
},
{
"id": "MBED_31", "description": "PWM LED test",
"source_dir": join(TEST_DIR, "mbed", "pwm_led"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_32", "description": "Pin toggling",
"source_dir": join(TEST_DIR, "mbed", "pin_toggling"),
"dependencies": [MBED_LIBRARIES],
},
{
"id": "MBED_33", "description": "C string operations",
"source_dir": join(TEST_DIR, "mbed", "cstring"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False,
},
{
"id": "MBED_34", "description": "Ticker Two callbacks",
"source_dir": join(TEST_DIR, "mbed", "ticker_3"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto"
},
{
"id": "MBED_35", "description": "SPI C12832 display",
"source_dir": join(TEST_DIR, "mbed", "spi_C12832"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'C12832')],
"peripherals": ["C12832"],
"automated": True,
},
{
"id": "MBED_36", "description": "WFI correct behavior",
"source_dir": join(TEST_DIR, "mbed", "wfi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False
},
{
"id": "MBED_37", "description": "Serial NC RX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_rx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"exclude_mcu": ["NUCLEO_L011K4"],
"automated": True
},
{
"id": "MBED_38", "description": "Serial NC TX",
"source_dir": join(TEST_DIR, "mbed", "serial_nc_tx"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"exclude_mcu": ["NUCLEO_L011K4"],
"automated": True
},
{
"id": "MBED_39", "description": "Serial Complete",
"source_dir": join(TEST_DIR, "mbed", "serial_complete"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": False
},
# CMSIS RTOS tests
{
"id": "CMSIS_RTOS_1", "description": "Basic",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_2", "description": "Mutex",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_3", "description": "Semaphore",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_4", "description": "Signals",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_5", "description": "Queue",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_6", "description": "Mail",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
{
"id": "CMSIS_RTOS_8", "description": "ISR",
"source_dir": join(TEST_DIR, "rtos", "cmsis", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES],
},
# mbed RTOS tests
{
"id": "RTOS_1", "description": "Basic thread",
"source_dir": join(TEST_DIR, "rtos", "mbed", "basic"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303ZE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F446ZE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_2", "description": "Mutex resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mutex"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F103RB", "DISCO_F746NG",
"NUCLEO_F446ZE", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_3", "description": "Semaphore resource lock",
"source_dir": join(TEST_DIR, "rtos", "mbed", "semaphore"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F446ZE",
"NUCLEO_F103RB", "DISCO_F746NG",
"NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_4", "description": "Signals messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "signals"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE", "NUCLEO_F446RE", "NUCLEO_F446ZE",
"NUCLEO_F103RB", "DISCO_F746NG",
"NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_5", "description": "Queue messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "queue"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F446ZE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_6", "description": "Mail messaging",
"source_dir": join(TEST_DIR, "rtos", "mbed", "mail"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F446ZE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_7", "description": "Timer",
"source_dir": join(TEST_DIR, "rtos", "mbed", "timer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test": "wait_us_auto",
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F446ZE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_8", "description": "ISR (Queue)",
"source_dir": join(TEST_DIR, "rtos", "mbed", "isr"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB],
"automated": True,
"mcu": ["LPC1768", "LPC1549", "LPC11U24", "LPC812", "LPC2460", "LPC824", "SSCI824",
"KL25Z", "KL05Z", "K22F", "K64F", "K66F", "KL43Z", "KL46Z", "HEXIWEAR",
"RZ_A1H", "VK_RZ_A1H", "DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "DISCO_F469NI", "NUCLEO_F410RB", "NUCLEO_F429ZI",
"NUCLEO_F401RE", "NUCLEO_F334R8", "DISCO_F334C8", "NUCLEO_F302R8", "NUCLEO_F303ZE", "NUCLEO_F070RB", "NUCLEO_F207ZG",
"NUCLEO_L073RZ", "NUCLEO_F072RB", "NUCLEO_F091RC", "NUCLEO_L432KC", "DISCO_L476VG", "NUCLEO_L476RG",
"DISCO_F401VC", "NUCLEO_F303RE", "NUCLEO_F303K8", "MAXWSNENV", "MAX32600MBED", "NUCLEO_L152RE",
"NUCLEO_F446RE", "NUCLEO_F446ZE", "NUCLEO_F103RB", "DISCO_F746NG", "NUCLEO_F746ZG", "MOTE_L152RC", "B96B_F446VE",
"EFM32HG_STK3400", "EFM32PG_STK3401", "EFM32LG_STK3600", "EFM32GG_STK3700", "EFM32WG_STK3800",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"NRF51822", "NRF51_DK", "SEEED_TINY_BLE", "ARM_BEETLE_SOC", "NUCLEO_F767ZI", "DISCO_F769NI"],
},
{
"id": "RTOS_9", "description": "SD File write-read",
"source_dir": join(TEST_DIR, "rtos", "mbed", "file"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
"automated": True,
"peripherals": ["SD"],
"mcu": ["LPC1768", "LPC11U24", "LPC812", "KL25Z", "HEXIWEAR",
"KL05Z", "K64F", "K66F", "KL46Z", "RZ_A1H",
"NUMAKER_PFM_NUC472", "NUMAKER_PFM_M453",
"DISCO_F407VG", "DISCO_F429ZI", "NUCLEO_F429ZI", "NUCLEO_F411RE", "NUCLEO_F412ZG", "NUCLEO_F401RE", "NUCLEO_F410RB", "DISCO_F469NI", "NUCLEO_F207ZG"],
},
# Networking Tests
{
"id": "NET_1", "description": "TCP client hello world",
"source_dir": join(TEST_DIR, "net", "helloworld", "tcpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_2", "description": "NIST Internet Time Service",
"source_dir": join(TEST_DIR, "net", "helloworld", "udpclient"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_3", "description": "TCP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "tcpecho_server_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_4", "description": "TCP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_5", "description": "UDP echo server",
"source_dir": join(TEST_DIR, "net", "echo", "udp_server"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_server_auto",
"peripherals": ["ethernet"]
},
{
"id": "NET_6", "description": "UDP echo client",
"source_dir": join(TEST_DIR, "net", "echo", "udp_client"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test" : "udpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_7", "description": "HTTP client hello world",
"source_dir": join(TEST_DIR, "net", "protocols", "HTTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_8", "description": "NTP client",
"source_dir": join(TEST_DIR, "net", "protocols", "NTPClient_HelloWorld"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
"peripherals": ["ethernet"],
},
{
"id": "NET_9", "description": "Multicast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_10", "description": "Multicast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "multicast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_11", "description": "Broadcast Send",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_send"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_12", "description": "Broadcast Receive",
"source_dir": join(TEST_DIR, "net", "helloworld", "broadcast_receive"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"peripherals": ["ethernet"],
},
{
"id": "NET_13", "description": "TCP client echo loop",
"source_dir": join(TEST_DIR, "net", "echo", "tcp_client_loop"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY, TEST_MBED_LIB],
"automated": True,
#"host_test": "tcpecho_client_auto",
"peripherals": ["ethernet"],
},
{
"id": "NET_14", "description": "UDP PHY/Data link layer",
"source_dir": join(TEST_DIR, "net", "echo", "udp_link_layer"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, ETH_LIBRARY],
"automated": False,
"host_test": "udp_link_layer_auto",
"peripherals": ["ethernet"],
},
# u-blox tests
{
"id": "UB_1", "description": "u-blox USB modem: HTTP client",
"source_dir": [join(TEST_DIR, "net", "cellular", "http", "ubloxusb"), join(TEST_DIR, "net", "cellular", "http", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
{
"id": "UB_2", "description": "u-blox USB modem: SMS test",
"source_dir": [join(TEST_DIR, "net", "cellular", "sms", "ubloxusb"), join(TEST_DIR, "net", "cellular", "sms", "common")],
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, USB_HOST_LIBRARIES, UBLOX_LIBRARY],
"supported": CORTEX_ARM_SUPPORT,
},
# USB Tests
# USB device test list
{
"id": "USB_1", "description": "Mouse",
"source_dir": join(TEST_DIR, "usb", "device", "basic"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_2", "description": "Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_3", "description": "Mouse_Keyboard",
"source_dir": join(TEST_DIR, "usb", "device", "keyboard"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_4", "description": "Serial Port",
"source_dir": join(TEST_DIR, "usb", "device", "serial"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_5", "description": "Generic HID",
"source_dir": join(TEST_DIR, "usb", "device", "raw_hid"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_6", "description": "MIDI",
"source_dir": join(TEST_DIR, "usb", "device", "midi"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_7", "description": "AUDIO",
"source_dir": join(TEST_DIR, "usb", "device", "audio"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
{
"id": "USB_8", "description": "AUDIO_CB",
"source_dir": join(TEST_DIR, "usb", "device", "audio_cb"),
"dependencies": [MBED_LIBRARIES, USB_LIBRARIES],
},
# USB host test list
{
"id": "USB_10", "description": "MSD",
"source_dir": join(TEST_DIR, "usb", "host", "mass_storage"),
"dependencies": [MBED_LIBRARIES, USB_HOST_LIBRARIES, FAT_FS, RTOS],
},
{
"id": "USB_11", "description": "mouse",
"source_dir": join(TEST_DIR, "usb", "host", "mouse"),
"dependencies": [MBED_LIBRARIES, USB_HOST_LIBRARIES, RTOS],
},
# CMSIS DSP
{
"id": "CMSIS_DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "cmsis", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# mbed DSP
{
"id": "DSP_1", "description": "FIR",
"source_dir": join(TEST_DIR, "dsp", "mbed", "fir_f32"),
"dependencies": [MBED_LIBRARIES, DSP_LIBRARIES],
},
# KL25Z
{
"id": "KL25Z_1", "description": "LPTMR",
"source_dir": join(TEST_DIR, "KL25Z", "lptmr"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_2", "description": "PIT",
"source_dir": join(TEST_DIR, "KL25Z", "pit"),
"dependencies": [MBED_LIBRARIES],
"supported": CORTEX_ARM_SUPPORT,
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_3", "description": "TSI Touch Sensor",
"source_dir": join(TEST_DIR, "mbed", "tsi"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'TSI')],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_4", "description": "RTC",
"source_dir": join(TEST_DIR, "KL25Z", "rtc"),
"dependencies": [MBED_LIBRARIES],
"mcu": ["KL25Z"],
},
{
"id": "KL25Z_5", "description": "MMA8451Q accelerometer",
"source_dir": join(TEST_DIR, "mbed", "i2c_MMA8451Q"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, join(PERIPHERALS, 'MMA8451Q')],
"mcu": ["KL25Z", "KL05Z", "KL46Z", "K20D50M"],
"automated": True,
},
# Examples
{
"id": "EXAMPLE_1", "description": "/dev/null",
"source_dir": join(TEST_DIR, "mbed", "dev_null"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"exclude_mcu": ["NUCLEO_L011K4"],
"automated": True,
#"host_test" : "dev_null_auto",
},
{
"id": "EXAMPLE_2", "description": "FS + RTOS",
"source_dir": join(TEST_DIR, "mbed", "fs"),
"dependencies": [MBED_LIBRARIES, RTOS_LIBRARIES, TEST_MBED_LIB, FS_LIBRARY],
},
# CPPUTEST Library provides Unit testing Framework
#
# To write TESTs and TEST_GROUPs please add CPPUTEST_LIBRARY to 'dependencies'
#
# This will also include:
# 1. test runner - main function with call to CommandLineTestRunner::RunAllTests(ac, av)
# 2. Serial console object to print test result on serial port console
#
# Unit testing with cpputest library
{
"id": "UT_1", "description": "Basic",
"source_dir": join(TEST_DIR, "utest", "basic"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_2", "description": "Semihost file system",
"source_dir": join(TEST_DIR, "utest", "semihost_fs"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
"mcu": ["LPC1768", "LPC2368", "LPC11U24"]
},
{
"id": "UT_3", "description": "General tests",
"source_dir": join(TEST_DIR, "utest", "general"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_BUSIO", "description": "BusIn BusOut",
"source_dir": join(TEST_DIR, "utest", "bus"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_I2C_EEPROM_ASYNCH", "description": "I2C Asynch eeprom",
"source_dir": join(TEST_DIR, "utest", "i2c_eeprom_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SERIAL_ASYNCH", "description": "Asynch serial test (req 2 serial peripherals)",
"source_dir": join(TEST_DIR, "utest", "serial_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_SPI_ASYNCH", "description": "Asynch spi test",
"source_dir": join(TEST_DIR, "utest", "spi_asynch"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
{
"id": "UT_LP_TICKER", "description": "Low power ticker test",
"source_dir": join(TEST_DIR, "utest", "lp_ticker"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB, CPPUTEST_LIBRARY],
"automated": False,
},
# Tests used for target information purposes
{
"id": "DTCT_1", "description": "Simple detect test",
"source_dir": join(TEST_DIR, "mbed", "detect"),
"dependencies": [MBED_LIBRARIES, TEST_MBED_LIB],
"automated": True,
#"host_test" : "detect_auto",
},
]
# Group tests with the same goals into categories
GROUPS = {
"core": ["MBED_A1", "MBED_A2", "MBED_A3", "MBED_A18"],
"digital_io": ["MBED_A5", "MBED_A6", "MBED_A7", "MBED_A10", "MBED_A11"],
"analog_io": ["MBED_A8"],
"i2c": ["MBED_A19", "MBED_A20"],
"spi": ["MBED_A12"],
}
GROUPS["rtos"] = [test["id"] for test in TESTS if test["id"].startswith("RTOS_")]
GROUPS["net"] = [test["id"] for test in TESTS if test["id"].startswith("NET_")]
GROUPS["automated"] = [test["id"] for test in TESTS if test.get("automated", False)]
# Look for 'TEST_GROUPS' in mbed_settings.py and update the GROUPS dictionary
# with the information in test_groups if found
try:
from mbed_settings import TEST_GROUPS
except:
TEST_GROUPS = {}
GROUPS.update(TEST_GROUPS)
class Test:
DEFAULTS = {
#'mcu': None,
'description': None,
'dependencies': None,
'duration': 30,
'host_test': 'host_test',
'automated': False,
'peripherals': None,
#'supported': None,
'source_dir': None,
'extra_files': None
}
def __init__(self, n):
self.n = n
self.__dict__.update(Test.DEFAULTS)
self.__dict__.update(TESTS[n])
def is_supported(self, target, toolchain):
if hasattr(self, 'mcu') and not target in self.mcu:
return False
if hasattr(self, 'exclude_mcu') and target in self.exclude_mcu:
return False
if not hasattr(self, 'supported'):
return True
return (target in self.supported) and (toolchain in self.supported[target])
def get_description(self):
if self.description:
return self.description
else:
return self.id
def __cmp__(self, other):
return cmp(self.n, other.n)
def __str__(self):
return "[%3d] %s: %s" % (self.n, self.id, self.get_description())
def __getitem__(self, key):
if key == "id": return self.id
elif key == "mcu": return self.mcu
elif key == "exclude_mcu": return self.exclude_mcu
elif key == "dependencies": return self.dependencies
elif key == "description": return self.description
elif key == "duration": return self.duration
elif key == "host_test": return self.host_test
elif key == "automated": return self.automated
elif key == "peripherals": return self.peripherals
elif key == "supported": return self.supported
elif key == "source_dir": return self.source_dir
elif key == "extra_files": return self.extra_files
else:
return None
TEST_MAP = dict([(test['id'], Test(i)) for i, test in enumerate(TESTS)])
# parser helpers
def test_known(string):
i = int(string)
if i >= 0 and i < len(TESTS):
return i
else:
raise ArgumentTypeError("{0} does not index a test. The accepted range is 0 to {1}\nThe test mapping is:\n{2}".format(i, len(TEST_MAP) - 1, columnate([str(i) + ":" + t['id'] for i,t in zip(range(len(TESTS)), TESTS)])))
def test_name_known(string):
if string not in TEST_MAP.keys() and \
(getattr(ps, "test_alias", None) is None or \
ps.test_alias.get(string, "") not in TEST_MAP.keys()):
raise ArgumentTypeError("Program with name '{0}' not found. Supported tests are: \n{1}".format(string, columnate([t['id'] for t in TESTS])))
return TEST_MAP[string].n
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import forwarders
from telemetry.core import util
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.core.backends.chrome import misc_web_contents_backend
from telemetry.core.forwarders import cros_forwarder
class CrOSBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
def __init__(self, browser_options, cri, is_guest, extensions_to_load):
super(CrOSBrowserBackend, self).__init__(
supports_tab_control=True, supports_extensions=not is_guest,
browser_options=browser_options,
output_profile_path=None, extensions_to_load=extensions_to_load)
# Initialize fields so that an explosion during init doesn't break in Close.
self._cri = cri
self._is_guest = is_guest
self._forwarder = None
from telemetry.core.backends.chrome import chrome_browser_options
assert isinstance(browser_options,
chrome_browser_options.CrosBrowserOptions)
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(self.wpr_port_pairs.http.local_port,
self.GetRemotePort(
self.wpr_port_pairs.http.local_port)),
https=forwarders.PortPair(self.wpr_port_pairs.https.local_port,
self.GetRemotePort(
self.wpr_port_pairs.http.local_port)),
dns=None)
self._remote_debugging_port = self._cri.GetRemotePort()
self._port = self._remote_debugging_port
# Copy extensions to temp directories on the device.
# Note that we also perform this copy locally to ensure that
# the owner of the extensions is set to chronos.
for e in extensions_to_load:
extension_dir = cri.RunCmdOnDevice(
['mktemp', '-d', '/tmp/extension_XXXXX'])[0].rstrip()
cri.PushFile(e.path, extension_dir)
cri.Chown(extension_dir)
e.local_path = os.path.join(extension_dir, os.path.basename(e.path))
self._cri.RestartUI(self.browser_options.clear_enterprise_policy)
util.WaitFor(self.IsBrowserRunning, 20)
# Delete test user's cryptohome vault (user data directory).
if not self.browser_options.dont_override_profile:
self._cri.RunCmdOnDevice(['cryptohome', '--action=remove', '--force',
'--user=%s' % self._username])
if self.browser_options.profile_dir:
cri.RmRF(self.profile_directory)
cri.PushFile(self.browser_options.profile_dir + '/Default',
self.profile_directory)
cri.Chown(self.profile_directory)
def GetBrowserStartupArgs(self):
args = super(CrOSBrowserBackend, self).GetBrowserStartupArgs()
args.extend([
'--enable-smooth-scrolling',
'--enable-threaded-compositing',
'--enable-per-tile-painting',
# Disables the start page, as well as other external apps that can
# steal focus or make measurements inconsistent.
'--disable-default-apps',
# Allow devtools to connect to chrome.
'--remote-debugging-port=%i' % self._remote_debugging_port,
# Open a maximized window.
'--start-maximized',
# Skip user image selection screen, and post login screens.
'--oobe-skip-postlogin',
# Debug logging.
'--vmodule=*/chromeos/net/*=2,*/chromeos/login/*=2'])
# Disable GAIA services unless we're using GAIA login, or if there's an
# explicit request for it.
if (self.browser_options.disable_gaia_services and
not self.browser_options.gaia_login):
args.append('--disable-gaia-services')
return args
@property
def pid(self):
return self._cri.GetChromePid()
@property
def browser_directory(self):
result = self._cri.GetChromeProcess()
if result and 'path' in result:
return os.path.dirname(result['path'])
return None
@property
def profile_directory(self):
return '/home/chronos/Default'
def GetRemotePort(self, port):
if self._cri.local:
return port
return self._cri.GetRemotePort()
def __del__(self):
self.Close()
def Start(self):
# Escape all commas in the startup arguments we pass to Chrome
# because dbus-send delimits array elements by commas
startup_args = [a.replace(',', '\\,') for a in self.GetBrowserStartupArgs()]
# Restart Chrome with the login extension and remote debugging.
logging.info('Restarting Chrome with flags and login')
args = ['dbus-send', '--system', '--type=method_call',
'--dest=org.chromium.SessionManager',
'/org/chromium/SessionManager',
'org.chromium.SessionManagerInterface.EnableChromeTesting',
'boolean:true',
'array:string:"%s"' % ','.join(startup_args)]
self._cri.RunCmdOnDevice(args)
if not self._cri.local:
self._port = util.GetUnreservedAvailableLocalPort()
self._forwarder = self.forwarder_factory.Create(
forwarders.PortPairs(
http=forwarders.PortPair(self._port, self._remote_debugging_port),
https=None,
dns=None), forwarding_flag='L')
# Wait for oobe.
self._WaitForBrowserToComeUp(wait_for_extensions=False)
util.WaitFor(lambda: self.oobe_exists, 10)
if self.browser_options.auto_login:
try:
if self._is_guest:
pid = self.pid
self.oobe.NavigateGuestLogin()
# Guest browsing shuts down the current browser and launches an
# incognito browser in a separate process, which we need to wait for.
util.WaitFor(lambda: pid != self.pid, 10)
elif self.browser_options.gaia_login:
self.oobe.NavigateGaiaLogin(self._username, self._password)
else:
self.oobe.NavigateFakeLogin(self._username, self._password)
self._WaitForLogin()
except util.TimeoutException:
self._cri.TakeScreenShot('login-screen')
raise exceptions.LoginException('Timed out going through login screen')
logging.info('Browser is up!')
def Close(self):
super(CrOSBrowserBackend, self).Close()
if self._cri:
self._cri.RestartUI(False) # Logs out.
self._cri.CloseConnection()
util.WaitFor(lambda: not self._IsCryptohomeMounted(), 30)
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
if self._cri:
for e in self._extensions_to_load:
self._cri.RmRF(os.path.dirname(e.local_path))
self._cri = None
@property
@decorators.Cache
def forwarder_factory(self):
return cros_forwarder.CrOsForwarderFactory(self._cri)
def IsBrowserRunning(self):
return bool(self.pid)
def GetStandardOutput(self):
return 'Cannot get standard output on CrOS'
def GetStackTrace(self):
return 'Cannot get stack trace on CrOS'
@property
@decorators.Cache
def misc_web_contents_backend(self):
"""Access to chrome://oobe/login page."""
return misc_web_contents_backend.MiscWebContentsBackend(self)
@property
def oobe(self):
return self.misc_web_contents_backend.GetOobe()
@property
def oobe_exists(self):
return self.misc_web_contents_backend.oobe_exists
@property
def _username(self):
return self.browser_options.username
@property
def _password(self):
return self.browser_options.password
def _IsCryptohomeMounted(self):
username = '$guest' if self._is_guest else self._username
return self._cri.IsCryptohomeMounted(username, self._is_guest)
def _IsLoggedIn(self):
"""Returns True if cryptohome has mounted, the browser is
responsive to devtools requests, and the oobe has been dismissed."""
return (self._IsCryptohomeMounted() and
self.HasBrowserFinishedLaunching() and
not self.oobe_exists)
def _WaitForLogin(self):
# Wait for cryptohome to mount.
util.WaitFor(self._IsLoggedIn, 60)
# Wait for extensions to load.
self._WaitForBrowserToComeUp()
# Workaround for crbug.com/374462 - the bug doesn't manifest in the guest
# session, which also starts with an open browser tab.
retries = 3
while not self._is_guest and not self.browser_options.gaia_login:
try:
# Open a new window/tab.
tab = self.tab_list_backend.New(timeout=30)
tab.Navigate('about:blank', timeout=10)
break
except (exceptions.TabCrashException, util.TimeoutException,
IndexError):
retries -= 1
logging.warning('TabCrashException/TimeoutException in '
'new tab creation/navigation, '
'remaining retries %d', retries)
if not retries:
raise
| |
import db
import Queue
import urllib2
import logging
import multiprocessing
import binascii
import uuid
import traceback
import datetime
import shapely.wkt
import shapely.geometry
from sqlalchemy import func, select, text
from cStringIO import StringIO
BASIS_TRANSLATION = {
'HumanObservation': 'Human observation',
'MachineObservation': 'Machine observation',
'PreservedSpecimen': 'Preserved specimen'
}
log = logging.getLogger(__name__)
class Syncer:
def __init__(self, ala, species_type, connection):
'''The `ala` param is the ala.py module. This is passed in a a ctor
param because it will be substituded with mockala.py during unit
testing.
species_type can be "birds" or "vertebrates".'''
row = connection.execute(db.sources.select()
.where(db.sources.c.name == 'ALA')
).fetchone()
if row is None:
raise RuntimeError('ALA row missing from sources table in db')
self.conn = connection
self.source_row_id = row['id']
self.last_import_time = row['last_import_time']
self.num_dirty_records_by_species_id = {}
self.ala = ala
self.species_type = species_type
self.ala_species_occurrence_counts = None # lazy loaded
self.ala_species_by_sname = None # lazy loaded
def sync(self, sync_species=True, sync_occurrences=True):
# species are never deleted, because occassionally ALA does
# not return the full list of species, which would cause species to be
# deleted locally and orphan their occurrences.
if sync_species:
log.info('Syncing species list')
transaction = self.conn.begin()
try:
added_species, deleted_species = self.added_and_deleted_species()
for species in added_species:
self.add_species(species)
log.info('Committing newly added species')
transaction.commit()
except:
log.critical('Performing rollback due to exception')
transaction.rollback()
raise
# update occurrences
if sync_occurrences:
log.info('Syncing occurrence records')
self.sync_occurrences()
def local_species(self):
'''Returns all db.species rows in the local database.'''
return self.conn.execute(db.species.select()).fetchall()
def local_species_by_scientific_name(self):
'''Returns all species in the local db in a dict. Scientific name is the
key, the db row is the value.'''
species = {}
for row in self.conn.execute(db.species.select()):
species[row['scientific_name']] = row;
return species
def _cache_all_remote_species(self):
if self.ala_species_by_sname is not None:
return # already cached
if self.species_type == "birds":
all_species = self.ala.all_bird_species()
elif self.species_type == "vertebrates":
all_species = self.ala.all_vertebrate_species()
else:
raise RuntimeError("Unknown species_type: " + self.species_type)
log.info("Fetching all %s from ALA", self.species_type)
self.ala_species_by_sname = {}
for species in all_species:
self.ala_species_by_sname[species.scientific_name] = species
def added_and_deleted_species(self):
'''Returns (added, deleted) where `added` is an iterable of ala.Species
objects that are not present in the local db, and `deleted` is an iterable
of rows from the db.species table that were not found at ALA.'''
local = self.local_species_by_scientific_name()
local_set = frozenset(local.keys())
self._cache_all_remote_species()
remote = self.ala_species_by_sname
remote_set = frozenset(remote.keys())
added_set = remote_set - local_set
deleted_set = local_set - remote_set
added = [species for name, species in remote.iteritems() if name in added_set]
deleted = [row for name, row in local.iteritems() if name in deleted_set]
return (added, deleted)
def add_species(self, species):
'''Adds `species` to the local db, where `species` is an ala.Species
object'''
log.info('Adding new species "%s"', species.scientific_name)
self.conn.execute(db.species.insert().values(
scientific_name=species.scientific_name,
common_name=species.common_name))
def delete_species(self, row):
'''Deletes `row` from the local db, where `s` is a row from the
db.species table'''
log.info('Deleting species "%s"', row['scientific_name'])
self.conn.execute(db.species.delete().where(db.species.c.id == row['id']))
def sync_occurrences(self):
'''Performs all adding, updating, and deleting of rows in the
db.occurrences table of the database.
Also updates db.species.c.num_dirty_occurrences.'''
start_time = datetime.datetime.utcnow()
# insert new, and update existing, occurrences
transaction = self.conn.begin()
try:
occ_generator = self.mp_fetch_occurrences(since=self.last_import_time,
record_dirty=True)
for occ in occ_generator:
self.upsert_occurrence(occ, occ.species_id)
# update last import time for ALA
self.conn.execute(db.sources.update()
.where(db.sources.c.id == self.source_row_id)
.values(last_import_time=start_time))
log.info('Committing updated occurrences')
transaction.commit()
except:
log.critical('Performing rollback due to exception')
transaction.rollback()
raise
# Brute force re-download all occurrences if the local and
# remote counts don't match
log.info('Performing re-download of occurrences for species'+
'with incorrect occurrence counts');
transaction = self.conn.begin()
try:
self.redownload_occurrences_if_needed()
log.info('Committing re-downloaded occurrences')
transaction.commit()
except:
log.critical('Performing rollback due to exception')
transaction.rollback()
raise
# log warnings if the counts dont match up
log.info('Checking that local occurrence counts match ALA')
self.check_occurrence_counts()
# calculate has_occurrences col for all dirtied species
if len(self.num_dirty_records_by_species_id) > 0:
log.info('Updating has_occurrences for all species');
dirty_ids = [str(int(x)) for x in self.num_dirty_records_by_species_id.keys()]
self.conn.execute('''
UPDATE species
SET has_occurrences = ((SELECT COUNT(*) FROM occurrences
WHERE species_id = species.id
LIMIT 1) > 0)
WHERE id IN ({dirtied_species});
'''.format(
dirtied_species=','.join(dirty_ids)
));
# increase number in db.species.num_dirty_occurrences
log.info('Updating number of dirty occurrences')
self.update_num_dirty_occurrences()
def redownload_occurrences_if_needed(self):
'''Re-downloads every single record for each species, but only if the
occurrence counts differ between ALA and the local database.
This must be called as the last step in syncing occurrence records,
because adding and updating occurrences will alter the number of
records per species.
This operation is expensive in terms of memory and time, so it only
happens when the local occurrence count per species is different to the
count at ALA.
Builds a set of uuid.UUID objects for records that exist at ALA, then
checks every local record to see if it still exists in the set.'''
species_to_redownload = []
for row, lc, rc in self.species_with_occurrence_counts():
# don't run unless our count is different to ALAs count
if lc == rc:
continue
log.warning('Performing full re-download for species %s. ' +
'(local count = %d, ALA count = %d)',
row['scientific_name'],
lc, rc)
species_to_redownload.append(row)
# delete local records
# will cascade into sensitive_occurrences table
self.conn.execute(db.occurrences.delete()
.where(db.occurrences.c.species_id == row['id'])
.where(db.occurrences.c.source_id == self.source_row_id))
# keep track of the deletions and additions
self.increase_dirty_count(row['id'], abs(lc - rc))
# fetch all the records again
occ_generator = self.mp_fetch_occurrences(
since=None,
species_to_fetch=species_to_redownload,
record_dirty=False)
for occurrence in occ_generator:
self.upsert_occurrence(occurrence, occurrence.species_id)
def check_occurrence_counts(self):
'''Logs warnings if the local and remote occurrence counts per species
do not match.'''
for row, lc, rc in self.species_with_occurrence_counts():
if lc == rc:
continue # counts are the same
log.warning('Occurrence counts differ for species %s,' +
'(local count = %d, ALA count = %d)',
row['scientific_name'],
lc, rc)
def species_with_occurrence_counts(self):
'''Checks the number of local occurrences against the number of
occurrences at ALA, yielding the db.species row, local count and remote
count if the counts are different.'''
remote_counts = self.remote_occurrence_counts_by_species_id()
local_counts = self.local_occurrence_counts_by_species_id()
for row in self.local_species():
if row['id'] in remote_counts:
yield (row, local_counts[row['id']], remote_counts[row['id']])
def remote_occurrence_counts_by_species_id(self):
'''Returns a dict with db.species.c.id keys, and the values are the
number of occurrences present at ALA for that species.
The results are cached after the first call to this method.'''
# try return cached data
if self.ala_species_occurrence_counts is not None:
return self.ala_species_occurrence_counts
log.info('Fetching ALA occurrence counts for species')
input_q = multiprocessing.Queue()
pool = multiprocessing.Pool(8, _mp_init, [input_q, self.ala])
active_workers = 0
# fill pool with every species
for row in self.local_species():
species = self.ala_species_for_scientific_name(row['scientific_name'])
if species is not None:
args = (species, row['id'])
pool.apply_async(_mp_fetch_occur_count, args)
active_workers += 1
pool.close()
#keep reading from the queue until all subprocesses are done
self.ala_species_occurrence_counts = {}
while active_workers > 0:
result = input_q.get()
active_workers -= 1
if active_workers % 100 == 0:
log.info('%d species remaining to fetch occurrence counts for',
active_workers)
if len(result) == 2:
species_id, occ_count = result
self.ala_species_occurrence_counts[species_id] = occ_count
else:
raise RuntimeError("Worker process failed: " + result[0])
# all the subprocesses should be dead by now
pool.join()
log.info('Finished fetching ALA occurrence counts for species')
return self.ala_species_occurrence_counts;
def local_occurrence_counts_by_species_id(self):
'''Returns a dict with db.species.c.id keys, and the calues are the
number of occurrences present in the local database for that
species.'''
counts = {}
for row in self.local_species():
local_count = self.conn.execute(select(
[func.count('*')],
#where
(db.occurrences.c.species_id == row['id']) &
(db.occurrences.c.source_id == self.source_row_id)
)).scalar()
counts[row['id']] = local_count
return counts
def ala_species_for_scientific_name(self, scientific_name):
'''Same as ala.species_for_scientific_name except caches the result'''
self._cache_all_remote_species()
if scientific_name in self.ala_species_by_sname:
return self.ala_species_by_sname[scientific_name]
else:
species = self.ala.species_for_scientific_name(scientific_name)
self.ala_species_by_sname[scientific_name] = species
return species
def upsert_occurrence(self, occ, species_id):
'''Looks up whether `occurrence` (an ala.Occurrence object)
already exists in the local db. If it does, the db row is updated with
the information in `occurrence`. If it does not exist, a new row is
inserted.
`species_id` must be supplied as an argument because it is not
obtainable from `occ` alone. Also expects `occ.classification` to be
valid.'''
sql = '''SELECT EdgarUpsertOccurrence(
{classi},
{date},
{srid},
{lat},
{lon},
{slat},
{slon},
{uncertainty},
{basis},
{species_id},
{source_id},
{record_id});'''.format(
classi="'{0}'".format(occ.classification),
date=('NULL' if occ.date is None else "'{0}'".format(occ.date.isoformat())),
srid='4326',
lat=str(float(occ.coord.lati)),
lon=str(float(occ.coord.longi)),
slat=('NULL' if occ.sensitive_coord is None else str(float(occ.sensitive_coord.lati))),
slon=('NULL' if occ.sensitive_coord is None else str(float(occ.sensitive_coord.longi))),
uncertainty=('NULL' if occ.uncertainty is None else str(int(occ.uncertainty))),
basis=('NULL' if occ.basis is None else "'"+BASIS_TRANSLATION[occ.basis]+"'"),
species_id=str(int(species_id)),
source_id=str(int(self.source_row_id)),
record_id=postgres_escape_bytea(occ.uuid.bytes)
)
self.conn.execute(text(sql).execution_options(autocommit=True))
def mp_fetch_occurrences(self, since, record_dirty=False, species_to_fetch=None):
'''Generator for ala.Occurrence objects.
`species` is an iterable of db.species rows. If it is None (default) it
will get all species rows from the database, so update the species
table before calling this function.
Uses a pool of processes to fetch occurrence records. The subprocesses
feed the records into a queue which the original process reads and
yields. This should let the main process access the database at full
speed while the subprocesses are waiting for more records to arrive
over the network.'''
if species_to_fetch is None:
species_to_fetch = self.conn.execute(db.species.select());
input_q = multiprocessing.Queue(10000)
pool = multiprocessing.Pool(5, _mp_init, [input_q, self.ala])
active_workers = 0
# fill the pool full with every species
for species_row in species_to_fetch:
sciname = species_row['scientific_name']
species = self.ala_species_for_scientific_name(sciname)
if species is None:
log.warning("Should have ALA.Species for %s, but don't",
sciname)
elif species.scientific_name != sciname:
#old species that has been renamed, don't fetch
pass
else:
args = (species, species_row['id'], since)
pool.apply_async(_mp_fetch_occurrences, args)
active_workers += 1
pool.close()
# keep reading from the queue until all the subprocesses are finished
while active_workers > 0:
record = None
while record is None:
try:
record = input_q.get(True, 10.0)
except Queue.Empty:
log.warning(
'Received nothing from ALA in the last 10 seconds')
if isinstance(record, self.ala.Occurrence):
yield record
elif isinstance(record, tuple):
active_workers -= 1
if len(record) == 3:
species = record[0]
species_id = record[1]
num_records = record[2]
log.info('Finished processing %d records for %s' +
' (%d species remaining)',
num_records,
species.scientific_name,
active_workers)
if record_dirty:
self.increase_dirty_count(species_id, num_records)
else:
raise RuntimeError('Worker process failed: ' + record[0])
else:
raise RuntimeError('Unexpected type coming from input_q: ' +
str(type(record)))
# all the subprocesses should be dead by now
pool.join()
def increase_dirty_count(self, species_id, num_dirty):
if species_id in self.num_dirty_records_by_species_id:
self.num_dirty_records_by_species_id[species_id] += num_dirty
else:
self.num_dirty_records_by_species_id[species_id] = num_dirty
def local_species_with_no_occurrences(self):
'''A generator for db.species rows, for rows without any occurrence
records in the local database'''
for row in self.local_species():
q = select(['count(*)'], db.occurrences.c.species_id == row['id'])
if self.conn.execute(q).scalar() == 0:
yield row
def update_num_dirty_occurrences(self):
'''Updates the species.num_dirty_occurrences column with the number of
occurrences that have been changed by self. Also updates
needs_vetting_since column if dirty occurrences > 0.'''
for row in self.local_species():
if row['id'] not in self.num_dirty_records_by_species_id:
continue
newly_dirty = self.num_dirty_records_by_species_id[row['id']]
if newly_dirty <= 0:
continue
dirty_col = db.species.c.num_dirty_occurrences
self.conn.execute(db.species.update()
.values(
num_dirty_occurrences=(dirty_col + newly_dirty),
needs_vetting_since=func.now()
).where(db.species.c.id == row['id']))
def classification_for_occurrence(occ):
'''Returns an occurrences.classification enum value for an
ala.Occurrence.'''
if 'detectedOutlier' in occ.assertions:
return 'vagrant'
elif 'habitatMismatch' in occ.assertions:
return 'invalid'
else:
return 'unknown'
def postgres_escape_bytea(b):
'''Escapes a byte string into an SQL literal, suitable for adding directly
into an SQL string'''
strio = StringIO()
strio.write("E'")
for ch in b:
part = oct(ord(ch))
if len(part) > 3:
part = part.lstrip('0')
if len(part) < 3:
part = part.rjust(3, '0')
strio.write(r'\\')
strio.write(part)
strio.write("'::bytea")
return strio.getvalue()
def _mp_init(output_q, ala):
'''Called when a subprocess is started. See Syncer.mp_fetch_occurrences'''
_mp_init.ala = ala
_mp_init.output_q = output_q
_mp_init.log = multiprocessing.log_to_stderr()
_mp_init.log_level = _mp_init.log.getEffectiveLevel()
#stops annoying "child process shutting down" messages
_mp_init.log.setLevel(logging.WARNING)
def _mp_format_exception(e):
formatted = str(e) + '\n' + traceback.format_exc()
if isinstance(e, urllib2.HTTPError):
formatted += '\n\nResponse Headers:\n' + str(dict(e.info()))
formatted += '\n\nResponse Payload:\n' + e.read()
return formatted
def _mp_fetch_occurrences(species, species_id, since_date):
'''Gets all relevant records for the given species from ALA, and pumps the
records into _mp_init.output_q.
If the function finished successfully, will put a len 3 tuple in the
output_q with (species, species_id, num_occurrences_found). If the
function fails, will put a len 1 tuple in the _mp_init.output_q with a
failure message string in it.
Adds a `species_id` attribute to each ala.Occurrence object set to
the argument given to this function.
Also adds a `classification` attribute to each ala.Occurrence object,
which is the classification converted from the ALA assertions. Better to
do it here on a separate thread, than do it on the main thread.'''
_mp_init.log.setLevel(_mp_init.log_level);
try:
num_records = _mp_fetch_occurrences_inner(species,
species_id,
since_date)
_mp_init.output_q.put((species, species_id, num_records))
except Exception, e:
_mp_init.output_q.put((_mp_format_exception(e),))
#stops annoying "child process shutting down" messages
_mp_init.log.setLevel(logging.WARNING)
def _mp_fetch_occurrences_inner(species, species_id, since_date):
num_records = 0
for record in _mp_init.ala.occurrences_for_species(species.lsid, since_date):
record.species_id = species_id
record.classification = classification_for_occurrence(record)
_mp_init.output_q.put(record)
num_records += 1
return num_records
def _mp_fetch_occur_count(species, species_id):
_mp_init.log.setLevel(_mp_init.log_level);
try:
count = _mp_init.ala.num_occurrences_for_lsid(species.lsid)
_mp_init.output_q.put((species_id, count))
except Exception, e:
_mp_init.output_q.put((_mp_format_exception(e),))
#stops annoying "child process shutting down" messages
_mp_init.log.setLevel(logging.WARNING)
| |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DisplayAppliancePage(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, doc_name=None, document_id=None, external_document_id=None, height=None, is_first_page=None, page_id=None, page_no=None, page_status=None, page_type=None, width=None):
"""
DisplayAppliancePage - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'doc_name': 'str',
'document_id': 'str',
'external_document_id': 'str',
'height': 'int',
'is_first_page': 'bool',
'page_id': 'str',
'page_no': 'int',
'page_status': 'str',
'page_type': 'str',
'width': 'int'
}
self.attribute_map = {
'doc_name': 'docName',
'document_id': 'documentId',
'external_document_id': 'externalDocumentId',
'height': 'height',
'is_first_page': 'isFirstPage',
'page_id': 'pageId',
'page_no': 'pageNo',
'page_status': 'pageStatus',
'page_type': 'pageType',
'width': 'width'
}
self._doc_name = doc_name
self._document_id = document_id
self._external_document_id = external_document_id
self._height = height
self._is_first_page = is_first_page
self._page_id = page_id
self._page_no = page_no
self._page_status = page_status
self._page_type = page_type
self._width = width
@property
def doc_name(self):
"""
Gets the doc_name of this DisplayAppliancePage.
:return: The doc_name of this DisplayAppliancePage.
:rtype: str
"""
return self._doc_name
@doc_name.setter
def doc_name(self, doc_name):
"""
Sets the doc_name of this DisplayAppliancePage.
:param doc_name: The doc_name of this DisplayAppliancePage.
:type: str
"""
self._doc_name = doc_name
@property
def document_id(self):
"""
Gets the document_id of this DisplayAppliancePage.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:return: The document_id of this DisplayAppliancePage.
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""
Sets the document_id of this DisplayAppliancePage.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute.
:param document_id: The document_id of this DisplayAppliancePage.
:type: str
"""
self._document_id = document_id
@property
def external_document_id(self):
"""
Gets the external_document_id of this DisplayAppliancePage.
:return: The external_document_id of this DisplayAppliancePage.
:rtype: str
"""
return self._external_document_id
@external_document_id.setter
def external_document_id(self, external_document_id):
"""
Sets the external_document_id of this DisplayAppliancePage.
:param external_document_id: The external_document_id of this DisplayAppliancePage.
:type: str
"""
self._external_document_id = external_document_id
@property
def height(self):
"""
Gets the height of this DisplayAppliancePage.
Height of the tab in pixels.
:return: The height of this DisplayAppliancePage.
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""
Sets the height of this DisplayAppliancePage.
Height of the tab in pixels.
:param height: The height of this DisplayAppliancePage.
:type: int
"""
self._height = height
@property
def is_first_page(self):
"""
Gets the is_first_page of this DisplayAppliancePage.
:return: The is_first_page of this DisplayAppliancePage.
:rtype: bool
"""
return self._is_first_page
@is_first_page.setter
def is_first_page(self, is_first_page):
"""
Sets the is_first_page of this DisplayAppliancePage.
:param is_first_page: The is_first_page of this DisplayAppliancePage.
:type: bool
"""
self._is_first_page = is_first_page
@property
def page_id(self):
"""
Gets the page_id of this DisplayAppliancePage.
:return: The page_id of this DisplayAppliancePage.
:rtype: str
"""
return self._page_id
@page_id.setter
def page_id(self, page_id):
"""
Sets the page_id of this DisplayAppliancePage.
:param page_id: The page_id of this DisplayAppliancePage.
:type: str
"""
self._page_id = page_id
@property
def page_no(self):
"""
Gets the page_no of this DisplayAppliancePage.
:return: The page_no of this DisplayAppliancePage.
:rtype: int
"""
return self._page_no
@page_no.setter
def page_no(self, page_no):
"""
Sets the page_no of this DisplayAppliancePage.
:param page_no: The page_no of this DisplayAppliancePage.
:type: int
"""
self._page_no = page_no
@property
def page_status(self):
"""
Gets the page_status of this DisplayAppliancePage.
:return: The page_status of this DisplayAppliancePage.
:rtype: str
"""
return self._page_status
@page_status.setter
def page_status(self, page_status):
"""
Sets the page_status of this DisplayAppliancePage.
:param page_status: The page_status of this DisplayAppliancePage.
:type: str
"""
self._page_status = page_status
@property
def page_type(self):
"""
Gets the page_type of this DisplayAppliancePage.
:return: The page_type of this DisplayAppliancePage.
:rtype: str
"""
return self._page_type
@page_type.setter
def page_type(self, page_type):
"""
Sets the page_type of this DisplayAppliancePage.
:param page_type: The page_type of this DisplayAppliancePage.
:type: str
"""
self._page_type = page_type
@property
def width(self):
"""
Gets the width of this DisplayAppliancePage.
Width of the tab in pixels.
:return: The width of this DisplayAppliancePage.
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""
Sets the width of this DisplayAppliancePage.
Width of the tab in pixels.
:param width: The width of this DisplayAppliancePage.
:type: int
"""
self._width = width
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
SwCDR DEFINITIONS
IMPLICIT TAGS ::=
BEGIN
EXPORTS
SwCDR;
SwCDR ::= CHOICE
{
origSvcCallRecord [0] OrigSvcCallRecord,
termSvcCallRecord [1] TermSvcCallRecord
}
--OrigSvcCallRecord ::= SET
OrigSvcCallRecord ::= SEQUENCE
{
callCorrelationId [0] INTEGER ,
chargingIndicator [1] ChargingIndicator,
sequenceNo [2] INTEGER ,
callingParty [3] CallingNumber,
calledParty [4] CalledNumber,
connectedNumber [5] ConnectedNumber,
startDate [6] StartDate,
startTime [7] StartTime,
duration [8] CallDuration ,
-- bearerClass [] BearerClass ,
trafficType [9] TrafficType ,
qosFwd [10] QoSClass ,
qosBkwd [11] QoSClass ,
forwardPcrClp0 [12] CellRate ,
forwardPcrClp01 [13] CellRate ,
backwardPcrClp0 [14] CellRate ,
backwardPcrClp01 [15] CellRate ,
forwardScrClp0 [16] CellRate ,
forwardScrClp01 [17] CellRate ,
backwardScrClp0 [18] CellRate ,
backwardScrClp01 [19] CellRate ,
forwardMcrClp0 [20] CellRate ,
forwardMcrClp01 [21] CellRate ,
backwardMcrClp0 [22] CellRate ,
backwardMcrClp01 [23] CellRate ,
forwardMbsClp0 [24] CellRate ,
forwardMbsClp01 [25] CellRate ,
forwardBEI [26] INTEGER ,
backwardBEI [27] INTEGER ,
forwardTagging [28] INTEGER ,
backwardTagging [29] INTEGER ,
-- egressCellrate0 [] INTEGER,
-- egressCellrate01 [] INTEGER,
ingressCellrate0 [30] INTEGER ,
-- ingressCellrate01 [] INTEGER ,
ingressCellrate1 [31] INTEGER ,
connectionConfig [32] UserPlaneConnection OPTIONAL
-- causeForTerm [33] CauseForTerm OPTIONAL
}
--TermSvcCallRecord ::= SET
TermSvcCallRecord ::= SEQUENCE
{
callCorrelationId [0] INTEGER ,
chargingIndicator [1] ChargingIndicator,
sequenceNo [2] INTEGER ,
callingParty [3] CallingNumber,
calledParty [4] CalledNumber,
connectedNumber [5] ConnectedNumber,
startDate [6] StartDate,
startTime [7] StartTime,
duration [8] CallDuration ,
-- bearerClass [] BearerClass ,
trafficType [9] TrafficType ,
qosFwd [10] QoSClass ,
qosBkwd [11] QoSClass ,
forwardPcrClp0 [12] CellRate ,
forwardPcrClp01 [13] CellRate ,
backwardPcrClp0 [14] CellRate ,
backwardPcrClp01 [15] CellRate ,
forwardScrClp0 [16] CellRate ,
forwardScrClp01 [17] CellRate ,
backwardScrClp0 [18] CellRate ,
backwardScrClp01 [19] CellRate ,
forwardMcrClp0 [20] CellRate ,
forwardMcrClp01 [21] CellRate ,
backwardMcrClp0 [22] CellRate ,
backwardMcrClp01 [23] CellRate ,
forwardMbsClp0 [24] CellRate ,
forwardMbsClp01 [25] CellRate ,
forwardBEI [26] INTEGER ,
backwardBEI [27] INTEGER ,
forwardTagging [28] INTEGER ,
backwardTagging [29] INTEGER ,
-- egressCellrate0 [] INTEGER ,
-- egressCellrate01 [] INTEGER ,
ingressCellrate0 [30] INTEGER ,
-- ingressCellrate01 [] INTEGER ,
ingressCellrate1 [31] INTEGER ,
connectionConfig [32] UserPlaneConnection OPTIONAL
-- causeForTerm [33] CauseForTerm OPTIONAL
}
ChargingIndicator ::= INTEGER
{
origCallRecord (0),
termCallRecord (1)
}
CallingNumber ::= OCTET STRING (SIZE (12))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
CalledNumber ::= OCTET STRING (SIZE (20))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation,
-- PresentationIndicator, ScreeningIndicator
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
ConnectedNumber ::= OCTET STRING (SIZE (12))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation,
-- PresentationIndicator, ScreeningIndicator
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
QoSClass ::= INTEGER
-- Explicit values ToBeDefined,
-- until then: value received in SETUP-msg
--BearerClass ::= INTEGER
--{
-- bcobA (0),
-- bcobC (1),
-- bcobX (2)
--}
TrafficType ::= INTEGER
{
noIndication (0),
abr (1),
cbr (2),
vbr (3),
vbrrt (4),
vbrnrt (5),
ubr (6)
}
--TimingRequirements ::= INTEGER
--{
-- noIndication (0),
-- endToEndRequired (1),
-- endToEndNotRequired (2)
--}
--ClippingSusceptibility ::= INTEGER
--{
-- notSusceptible (0),
-- susceptible (1)
--}
UserPlaneConnection ::= INTEGER
{
pointToPoint (0),
pointToMultipoint (1)
}
--AALParameters ::= INTEGER AAL Type only
--{
-- userDefined (0),
-- aal1 (1),
-- aal2 (2),
-- aal34 (3),
-- aal5 (5)
--}
CellRate ::= INTEGER
-- Value range not less than 2^24.
-- BurstSize ::= ToBeDefined
-- TaggingRequest ::= ToBeDefined
--Timestamp ::= OCTET STRING (SIZE (11))
-- The contents of this field is a compact form of
-- the UTCTime format, containing local time plus
-- an offset to universal time.
-- The compact format is YYMMDDhhmmssdddShhmm, where:
-- YY = year, 00-99, BCD encoded
-- MM = month, 01-12, BCD encoded
-- DD = day, 01-31, BCD encoded
-- hh = hour, 00-23, BCD encoded
-- mm = minute, 00-59, BCD encoded
-- ss = second, 00-59, BCD encoded
-- ddd = millisecond, 000-999, BCD encoded
-- and rightjustified as "0ddd"
-- S = sign, "+"/"-", ASCII encoded
StartDate ::= OCTET STRING (SIZE (8))
StartTime ::= OCTET STRING (SIZE (6))
CallDuration ::= INTEGER
-- Expressed as number of milliseconds
Cellrate ::= INTEGER
-- Value range 0-2^64
CauseForTerm ::= INTEGER
{
unsuccessfulCallAttempt (0),
abnormalTermination (1)
}
END
| |
# Copyright 2014 Andrey Danin
# Copyright 2014 Wallarm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collectd
import logging
import msgpack
import os
import Queue
import requests
import time
import threading
import traceback
import urlparse
import yaml
from copy import copy
# NOTE: This version is grepped from the Makefile, so don't change the
# format of this line.
version = "0.0.2"
plugin_name = 'WallarmAPIWriter'
class WallarmApiWriter(object):
def __init__(self, plugin_name):
self.plugin_name = plugin_name
self.config = {
'api_conn_file': '/etc/wallarm/node.yaml',
'url_path': '/v1/objects/node/stat/create',
'types_db': ['/usr/share/collectd/types.db'],
'default_ports': {
'http': 80,
'https': 444,
},
'measr_avg_size': 50,
'send_timeout_secs': 4,
'flush_interval_secs': 10,
'sleep_tick_interval_secs': 0.1,
'max_msg_size_bytes': 524288,
'max_store_timeout': 900,
'queue_size_decreaser': 5,
'logging': {
'enabled': False,
'filename': '/tmp/wallarm_api_writer.log',
'level': 'debug',
}
}
self.default_api_config = {
'host': 'api.wallarm.com',
'ca_path': '/usr/share/wallarm-common/ca.pem',
'ca_verify': True,
'use_ssl': True,
}
self.types = {}
self.api_config = {}
self.api_url = ''
self.api_file_mtime = 0
self.measr_avg_size = None
self.last_try_time = 0
self.last_flush_time = 0
self.logger = None
def get_time(self):
"""
Return the current time as epoch seconds.
"""
return int(time.mktime(time.localtime()))
def setup_logging(self):
if ('logging' not in self.config or
not self.config['logging'].get('enabled')):
return
logconfig = self.config['logging']
logging.basicConfig(
filename=logconfig['filename'],
level=logging.getLevelName(logconfig['level'].upper()))
self.logger = logging.getLogger()
def log(self, level, msg):
if self.logger:
getattr(self.logger, level)(msg)
getattr(collectd, level)(msg)
def wallarm_api_writer_config(self, cfg_obj):
for child in cfg_obj.children:
val = child.values[0]
if child.key == 'NodeYamlFile':
self.config['api_conn_file'] = val
elif child.key == 'TypesDB':
self.config['types_db'] = child.values
elif child.key == 'MaxRequestSize':
self.config['max_msg_size_bytes'] = int(val)
elif child.key == 'DropOutdatedTimeout':
self.config['max_store_timeout'] = int(val)
elif child.key == 'FlushInterval':
self.config['flush_interval_secs'] = int(val)
elif child.key == 'FlushTimeout':
self.config['send_timeout_secs'] = int(val)
elif child.key == 'URLPath':
self.config['url_path'] = val
else:
self.log(
'warning',
'{0}: Unknown config key: {1}.'.format(
self.plugin_name,
child.key
)
)
self.setup_logging()
if 'api_conn_file' not in self.config:
msg = '{0}: No file with an API configuration provided'.format(
self.plugin_name
)
self.log('error', msg)
raise ValueError(msg)
self.log("info", "Got config: {}".format(self.config))
def wallarm_parse_types_file(self, path):
"""
Parse the types.db(5) file to determine metric types.
"""
with open(path, 'r') as f:
for line in f:
fields = line.split()
if len(fields) < 2:
continue
type_name = fields[0]
if type_name[0] == '#':
continue
v = []
for ds in fields[1:]:
ds = ds.rstrip(',')
ds_fields = ds.split(':')
if len(ds_fields) != 4:
self.log(
'warning',
'{0}: cannot parse data source {1}'
' on type {2}'.format(
self.plugin_name,
ds,
type_name
)
)
continue
v.append(ds_fields)
self.types[type_name] = zip(
*map(
lambda n: n[:2], v
)
)
def drop_creds(self):
self.api_config = {}
self.api_url = ''
def get_api_credentials(self):
"""
Read a settings YAML file with API credentials
"""
self.drop_creds()
try:
with open(self.config['api_conn_file']) as fo:
api_creds = yaml.load(fo)
# TODO (adanin): catch yaml.load exception too.
except IOError as e:
self.log(
'error',
"{0}: Cannot get API configuration from file {1}: {2}".format(
self.plugin_name,
self.config['api_conn_file'],
str(e)
)
)
raise e
if 'uuid' not in api_creds or 'secret' not in api_creds:
msg = (
"{0}: There is no 'secret' or 'uuid' fields"
" in API configuration file".format(self.plugin_name)
)
self.log('error', msg)
raise ValueError(msg)
self.api_config = copy(self.default_api_config)
for key in 'uuid', 'secret':
self.api_config[key] = api_creds[key]
if 'api' not in api_creds:
return
for key in 'host', 'port', 'use_ssl', 'ca_path', 'ca_verify':
if key in api_creds['api']:
self.api_config[key] = api_creds['api'][key]
def create_api_url(self):
scheme = 'https' if self.api_config['use_ssl'] else 'http'
port = self.api_config.get(
'port',
self.config['default_ports'][scheme]
)
netloc = '{}:{}'.format(self.api_config['host'], port)
self.api_url = urlparse.urlunparse((
scheme,
netloc,
self.config['url_path'],
None,
None,
None
))
def build_http_auth(self):
return {
'X-WallarmAPI-Node': self.api_config['uuid'],
'X-WallarmAPI-Secret': self.api_config['secret'],
}
def prepare_http_headers(self):
self.http_headers = {
'Content-Type': 'application/msgpack',
}
self.http_headers.update(self.build_http_auth())
def is_new_credentials(self):
try:
file_mtime = os.stat(self.config['api_conn_file']).st_mtime
if file_mtime != self.api_file_mtime:
self.api_file_mtime = file_mtime
return True
except OSError:
self.drop_creds()
return False
def update_credentials(self):
if self.is_new_credentials():
try:
self.get_api_credentials()
self.prepare_http_headers()
self.create_api_url()
except ValueError:
pass
def wallarm_write(self, value):
if value.type not in self.types:
self.log(
'warning',
'{0}: do not know how to handle type {1}. Do you have'
' all your types.db files configured?'.format(
self.plugin_name,
value.type,
)
)
return
v_type = self.types[value.type]
if len(v_type[0]) != len(value.values):
self.log(
'warning',
'{0}: differing number of values for type {1}'.format(
self.plugin_name,
value.type,
)
)
return
measurement = {
"values": value.values,
"dstypes": v_type[0],
"dsnames": v_type[1],
"time": value.time,
"interval": value.interval,
"plugin": value.plugin,
"plugin_instance": value.plugin_instance,
"type": value.type,
"type_instance": value.type_instance
}
self.main_queue.put(measurement)
def shutdown_callback(self):
# If send_thread is sending now, wait until finished and exit.
if self.send_lock.locked():
self.send_lock.acquire()
self.shutdown_event.set()
self.send_lock.release()
return
# Otherwise send data manually.
self.send_lock.acquire()
self.shutdown_event.set()
try:
self.send_loop()
except Exception:
msg = "{0}: Flushing queue before shutdown failed: {1}".format(
self.plugin_name,
traceback.format_exc()
)
self.log('error', msg)
self.send_lock.release()
def update_queue_size(self):
size = int(self.config['max_msg_size_bytes'] / self.measr_avg_size)
self.send_queue_size = size - self.config['queue_size_decreaser']
def get_payload(self):
# Fill up send_queue with new messages until full.
try:
for i in xrange(self.send_queue_size - len(self.send_queue)):
self.send_queue.append(self.main_queue.get_nowait())
self.main_queue.task_done()
except Queue.Empty:
pass
if not len(self.send_queue):
return '', 0
# self.log("info",
# "Trying to pack queue with {} messages.".format(
# len(self.send_queue)
# )
# )
# Pack messages and try to fit into limit.
msg = msgpack.packb({'data': self.send_queue})
msg_len = len(self.send_queue)
while len(msg) > self.config['max_msg_size_bytes']:
self.measr_avg_size = len(msg) / len(self.send_queue)
self.update_queue_size()
msg = msgpack.packb(
{'data': self.send_queue[:self.send_queue_size]}
)
msg_len = self.send_queue_size
# self.log(
# "info",
# "Packed {} messages with total size {} bytes. {} messages in"
# " the send_queue left.".format(msg_len, len(msg),
# len(self.send_queue))
# )
return msg, msg_len
def drop_old_messages(self):
if not len(self.send_queue):
return
time_delta = self.get_time() - self.send_queue[-1]['time']
if time_delta > self.config['max_store_timeout']:
self.log(
'info',
"{0}: Drop {1} outdated messages from queue".format(
self.plugin_name,
len(self.send_queue),
)
)
self.send_queue[:] = ()
def send_loop(self):
# Drop outdated messages from send_queue.
self.drop_old_messages()
payload, msg_len = self.get_payload()
self.update_credentials()
if not self.api_url:
return False
while payload:
if not self.send_data(payload):
return False
self.send_queue[:msg_len] = ()
self.last_flush_time = self.get_time()
payload, msg_len = self.get_payload()
return True
def send_data(self, payload):
"""
POST a collection of metrics to the API.
"""
if (self.api_config['use_ssl'] and self.api_config['ca_verify']
and self.api_config['ca_path']):
verify = self.api_config['ca_path']
else:
verify = None
try:
req = requests.post(
self.api_url,
verify=verify,
data=payload,
headers=self.http_headers,
timeout=self.config['send_timeout_secs'],
)
if req.status_code not in (200,):
self.log(
'warning',
"{0}: Cannot send data to the API: server return code {1} ({2})".format(
self.plugin_name,
req.status_code,
req.content
)
)
return False
except requests.exceptions.RequestException:
self.log(
'warning',
"{0}: Cannot send data to the API: {1}".format(
self.plugin_name,
traceback.format_exc(),
)
)
return False
return True
def send_watchdog(self):
send_result = True
while not self.shutdown_event.is_set():
# Sleep more if the last send operation fails.
if send_result:
time.sleep(self.config['sleep_tick_interval_secs'])
else:
time.sleep(self.config['flush_interval_secs'])
time_delta = self.get_time() - self.last_try_time
msg_count = len(self.send_queue) + self.main_queue.qsize()
if (time_delta < self.config['flush_interval_secs'] and
msg_count < self.send_queue_size):
continue
self.send_lock.acquire()
# If there was a send operation by shutdown_callback, do nothing.
if self.shutdown_event.is_set():
self.send_lock.release()
return
try:
send_result = self.send_loop()
except Exception:
msg = "{0}: Sender failed and will be restarted: {1}".format(
self.plugin_name,
traceback.format_exc()
)
self.log('error', msg)
send_result = False
self.send_lock.release()
self.last_try_time = self.get_time()
def wallarm_init(self):
for typedb_file in self.config['types_db']:
try:
self.wallarm_parse_types_file(typedb_file)
except IOError as e:
msg = "{0}: Unable to open TypesDB file '{1}': {2}.".format(
self.plugin_name,
typedb_file,
str(e)
)
self.log('warning', msg)
if not len(self.types):
msg = (
"{0}: Didn't find any valid type in TypesDB files: {1}".format(
self.plugin_name,
self.config['types_db'],
)
)
self.log('error', msg)
raise ValueError(msg)
self.last_try_time = self.get_time()
self.last_flush_time = self.get_time()
self.main_queue = Queue.Queue()
self.send_queue = []
self.measr_avg_size = self.config['measr_avg_size']
self.update_queue_size()
self.shutdown_event = threading.Event()
self.send_lock = threading.Lock()
self.send_thread = threading.Thread(target=self.send_watchdog)
self.send_thread.start()
collectd.register_write(self.wallarm_write)
collectd.register_shutdown(self.shutdown_callback)
plugin = WallarmApiWriter(plugin_name)
collectd.register_config(plugin.wallarm_api_writer_config)
collectd.register_init(plugin.wallarm_init)
| |
from contextlib import contextmanager
import mock
import platform
import pytest
import time
import unittest2
from uuid import uuid4
from pykafka import KafkaClient
from pykafka.simpleconsumer import OwnedPartition, OffsetType
from pykafka.test.utils import get_cluster, stop_cluster
from pykafka.utils.compat import range, iteritems
class TestSimpleConsumer(unittest2.TestCase):
maxDiff = None
USE_RDKAFKA = False
USE_GEVENT = False
@classmethod
def setUpClass(cls):
cls.kafka = get_cluster()
cls.topic_name = uuid4().hex.encode()
cls.kafka.create_topic(cls.topic_name, 3, 2)
cls.total_msgs = 1000
cls.client = KafkaClient(cls.kafka.brokers)
cls.prod = cls.client.topics[cls.topic_name].get_producer(
min_queued_messages=1
)
for i in range(cls.total_msgs):
cls.prod.produce('msg {i}'.format(i=i).encode())
cls.client = KafkaClient(cls.kafka.brokers)
@classmethod
def tearDownClass(cls):
stop_cluster(cls.kafka)
@contextmanager
def _get_simple_consumer(self, **kwargs):
topic = self.client.topics[self.topic_name]
consumer = topic.get_simple_consumer(
use_rdkafka=self.USE_RDKAFKA, **kwargs)
try:
yield consumer
finally:
consumer.stop()
def test_consume(self):
"""Test consuming all messages in topic"""
# This uses a fairly long timeout to allow the test to pass on an
# oversubscribed test cluster
with self._get_simple_consumer(consumer_timeout_ms=30000) as consumer:
count = 0
for msg in consumer:
self.assertIsNotNone(msg.value)
count += 1
if count == self.total_msgs:
# We don't want to wait for StopIteration, given the long
# timeout set above
break
self.assertEquals(count, self.total_msgs)
@staticmethod
def _convert_offsets(offset_responses):
"""Helper function to translate Offset(Fetch)PartitionResponse
Calls like consumer.fetch_offsets() and earliest_available_offsets()
return lists of OffsetPartitionResponses. These hold the next offset
to be consumed, whereas consumer.held_offsets returns the latest
consumed offset. This translates them to facilitate comparisons.
"""
if isinstance(offset_responses, dict):
offset_responses = iteritems(offset_responses)
f1 = lambda off: OffsetType.EARLIEST if off == 0 else off - 1
f2 = lambda off: off[0] if isinstance(off, list) else off
return {partition_id: f1(f2(offset_response.offset))
for partition_id, offset_response in offset_responses}
def test_offset_commit(self):
"""Check fetched offsets match pre-commit internal state"""
with self._get_simple_consumer(
consumer_group=b'test_offset_commit') as consumer:
[consumer.consume() for _ in range(100)]
offsets_committed = consumer.held_offsets
consumer.commit_offsets()
offsets_fetched = self._convert_offsets(consumer.fetch_offsets())
self.assertEquals(offsets_fetched, offsets_committed)
def test_offset_resume(self):
"""Check resumed internal state matches committed offsets"""
with self._get_simple_consumer(
consumer_group=b'test_offset_resume') as consumer:
[consumer.consume() for _ in range(100)]
offsets_committed = consumer.held_offsets
consumer.commit_offsets()
with self._get_simple_consumer(
consumer_group=b'test_offset_resume') as consumer:
self.assertEquals(consumer.held_offsets, offsets_committed)
def test_reset_offset_on_start(self):
"""Try starting from LATEST and EARLIEST offsets"""
with self._get_simple_consumer(
auto_offset_reset=OffsetType.EARLIEST,
reset_offset_on_start=True) as consumer:
earliest_offs = self._convert_offsets(
consumer.topic.earliest_available_offsets())
self.assertEquals(earliest_offs, consumer.held_offsets)
self.assertIsNotNone(consumer.consume())
with self._get_simple_consumer(
auto_offset_reset=OffsetType.LATEST,
reset_offset_on_start=True,
consumer_timeout_ms=500) as consumer:
latest_offs = self._convert_offsets(
consumer.topic.latest_available_offsets())
self.assertEquals(latest_offs, consumer.held_offsets)
self.assertIsNone(consumer.consume(block=False))
difference = sum(latest_offs[i] - earliest_offs[i]
if earliest_offs[i] >= 0 else latest_offs[i] + 1
if latest_offs[i] >= 0 else 0
for i in latest_offs)
self.assertEqual(difference, self.total_msgs)
def test_reset_offsets(self):
"""Test resetting to user-provided offsets"""
with self._get_simple_consumer(
auto_offset_reset=OffsetType.EARLIEST) as consumer:
# Find us a non-empty partition "target_part"
part_id, latest_offset = next(
(p, res.offset[0])
for p, res in consumer.topic.latest_available_offsets().items()
if res.offset[0] > 0)
target_part = consumer.partitions[part_id]
# Set all other partitions to LATEST, to ensure that any consume()
# calls read from target_part
partition_offsets = {
p: OffsetType.LATEST for p in consumer.partitions.values()}
new_offset = latest_offset - 5
partition_offsets[target_part] = new_offset
consumer.reset_offsets(partition_offsets.items())
self.assertEqual(consumer.held_offsets[part_id], new_offset)
msg = consumer.consume()
self.assertEqual(msg.offset, new_offset + 1)
# Invalid offsets should get overwritten as per auto_offset_reset
partition_offsets[target_part] = latest_offset + 5 # invalid!
consumer.reset_offsets(partition_offsets.items())
# SimpleConsumer's fetcher thread will detect the invalid offset
# and reset it immediately. RdKafkaSimpleConsumer however will
# only get to write the valid offset upon a call to consume():
msg = consumer.consume()
expected_offset = target_part.earliest_available_offset()
self.assertEqual(msg.offset, expected_offset)
self.assertEqual(consumer.held_offsets[part_id], expected_offset)
def test_update_cluster(self):
"""Check that the consumer can initiate cluster updates"""
with self._get_simple_consumer() as consumer:
self.assertIsNotNone(consumer.consume())
for broker in self.client.brokers.values():
broker._connection.disconnect()
# The consumer fetcher thread should prompt broker reconnection
t_start = time.time()
timeout = 10.
try:
for broker in self.client.brokers.values():
while not broker._connection.connected:
time.sleep(.1)
self.assertTrue(time.time() - t_start < timeout,
msg="Broker reconnect failed.")
finally:
# Make sure further tests don't get confused
consumer._update()
# If the fetcher thread fell over during the cluster update
# process, we'd get an exception here:
self.assertIsNotNone(consumer.consume())
def test_consumer_lag(self):
"""Ensure that after consuming the entire topic, lag is 0"""
with self._get_simple_consumer(consumer_group=b"test_lag_group",
consumer_timeout_ms=1000) as consumer:
while True:
message = consumer.consume()
if message is None:
break
consumer.commit_offsets()
latest_offsets = {p_id: res.offset[0]
for p_id, res
in iteritems(consumer.topic.latest_available_offsets())}
current_offsets = {p_id: res.offset for p_id, res in consumer.fetch_offsets()}
self.assertEqual(current_offsets, latest_offsets)
@pytest.mark.skipif(platform.python_implementation() == "PyPy",
reason="Unresolved crashes")
class TestGEventSimpleConsumer(TestSimpleConsumer):
USE_GEVENT = True
class TestOwnedPartition(unittest2.TestCase):
def test_partition_saves_offset(self):
offset = 20
msgval = "test"
partition = mock.MagicMock()
op = OwnedPartition(partition)
op.next_offset = offset
message = mock.Mock()
message.value = msgval
message.offset = offset
op.enqueue_messages([message])
self.assertEqual(op.message_count, 1)
ret_message = op.consume()
self.assertEqual(op.last_offset_consumed, message.offset)
self.assertEqual(op.next_offset, message.offset + 1)
self.assertNotEqual(ret_message, None)
self.assertEqual(ret_message.value, msgval)
def test_partition_rejects_old_message(self):
last_offset = 400
op = OwnedPartition(None)
op.last_offset_consumed = last_offset
message = mock.Mock()
message.value = "test"
message.offset = 20
op.enqueue_messages([message])
self.assertEqual(op.message_count, 0)
op.consume()
self.assertEqual(op.last_offset_consumed, last_offset)
def test_partition_consume_empty_queue(self):
op = OwnedPartition(None)
message = op.consume()
self.assertEqual(message, None)
def test_partition_offset_commit_request(self):
topic = mock.Mock()
topic.name = "test_topic"
partition = mock.Mock()
partition.topic = topic
partition.id = 12345
op = OwnedPartition(partition)
op.last_offset_consumed = 200
request = op.build_offset_commit_request()
self.assertEqual(request.topic_name, topic.name)
self.assertEqual(request.partition_id, partition.id)
self.assertEqual(request.offset, op.last_offset_consumed + 1)
self.assertEqual(request.metadata, b'pykafka')
def test_partition_offset_fetch_request(self):
topic = mock.Mock()
topic.name = "test_topic"
partition = mock.Mock()
partition.topic = topic
partition.id = 12345
op = OwnedPartition(partition)
request = op.build_offset_fetch_request()
self.assertEqual(request.topic_name, topic.name)
self.assertEqual(request.partition_id, partition.id)
def test_partition_offset_counters(self):
res = mock.Mock()
res.offset = 400
op = OwnedPartition(None)
op.set_offset(res.offset)
self.assertEqual(op.last_offset_consumed, res.offset)
self.assertEqual(op.next_offset, res.offset + 1)
if __name__ == "__main__":
unittest2.main()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
# Based on pacman (Afterburn <http://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
# and apt (Matthew Williams <matthew@flowroute.com>) modules.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
author: "Kevin Brebanov (@kbrebanov)"
version_added: "2.0"
options:
available:
description:
- During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
if the currently installed package is no longer available from any repository.
required: false
default: no
choices: [ "yes", "no" ]
version_added: "2.4"
name:
description:
- A package name, like C(foo), or multiple packages, like C(foo, bar).
required: false
default: null
repository:
description:
- A package repository or multiple repositories
required: false
default: null
version_added: "2.4"
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
required: false
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
upgrade:
description:
- Upgrade all installed packages to their latest version.
required: false
default: no
choices: [ "yes", "no" ]
notes:
- '"name" and "upgrade" are mutually exclusive.'
'''
EXAMPLES = '''
# Update repositories and install "foo" package
- apk:
name: foo
update_cache: yes
# Update repositories and install "foo" and "bar" packages
- apk:
name: foo,bar
update_cache: yes
# Remove "foo" package
- apk:
name: foo
state: absent
# Remove "foo" and "bar" packages
- apk:
name: foo,bar
state: absent
# Install the package "foo"
- apk:
name: foo
state: present
# Install the packages "foo" and "bar"
- apk:
name: foo,bar
state: present
# Update repositories and update package "foo" to latest version
- apk:
name: foo
state: latest
update_cache: yes
# Update repositories and update packages "foo" and "bar" to latest versions
- apk:
name: foo,bar
state: latest
update_cache: yes
# Update all installed packages to the latest versions
- apk:
upgrade: yes
# Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
- apk:
available: yes
upgrade: yes
# Update repositories as a separate step
- apk:
update_cache: yes
# Install package from a specific repository
- apk:
name: foo
state: latest
update_cache: yes
repository: http://dl-3.alpinelinux.org/alpine/edge/main
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when packages have changed
type: list
sample: ['package', 'other-package']
'''
import re
# Import module snippets.
from ansible.module_utils.basic import AnsibleModule
def parse_for_packages(stdout):
packages = []
data = stdout.split('\n')
regex = re.compile('^\(\d+/\d+\)\s+\S+\s+(\S+)')
for l in data:
p = regex.search(l)
if p:
packages.append(p.group(1))
return packages
def update_package_db(module, exit):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
elif exit:
module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
else:
return True
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def query_virtual(module, name):
cmd = "%s -v info --description %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = r"^%s: virtual meta package" % (re.escape(name))
if re.search(search_pattern, stdout):
return True
return False
def get_dependencies(module, name):
cmd = "%s -v info --depends %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
dependencies = stdout.split()
if len(dependencies) > 1:
return dependencies[1:]
else:
return []
def upgrade_packages(module, available):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
if available:
cmd = "%s --available" % cmd
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
if re.search(r'^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
def install_packages(module, names, state):
upgrade = False
to_install = []
to_upgrade = []
for name in names:
# Check if virtual package
if query_virtual(module, name):
# Get virtual package dependencies
dependencies = get_dependencies(module, name)
for dependency in dependencies:
if state == 'latest' and not query_latest(module, dependency):
to_upgrade.append(dependency)
else:
if not query_package(module, name):
to_install.append(name)
elif state == 'latest' and not query_latest(module, name):
to_upgrade.append(name)
if to_upgrade:
upgrade = True
if not to_install and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
packages = " ".join(to_install + to_upgrade)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, packages)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, packages)
else:
cmd = "%s add %s" % (APK_PATH, packages)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
packagelist = parse_for_packages(stdout)
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name=dict(type='list'),
repository=dict(type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
available=dict(default='no', type='bool'),
),
required_one_of=[['name', 'update_cache', 'upgrade']],
mutually_exclusive=[['name', 'upgrade']],
supports_check_mode=True
)
# Set LANG env since we parse stdout
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# add repositories to the APK_PATH
if p['repository']:
for r in p['repository']:
APK_PATH = "%s --repository %s" % (APK_PATH, r)
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module, not p['name'] and not p['upgrade'])
if p['upgrade']:
upgrade_packages(module, p['available'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
if __name__ == '__main__':
main()
| |
#
# Pyrex Top Level
#
import os, re, sys
if sys.version_info[:2] < (2, 3):
print >>sys.stderr, "Sorry, Pyrex requires Python 2.3 or later"
sys.exit(1)
import os
from time import time
import Builtin
import Code
import Errors
import Parsing
import Version
from Errors import PyrexError, CompileError, error
from Scanning import PyrexScanner
from Symtab import BuiltinScope, DefinitionScope, ImplementationScope
from Pyrex.Utils import set, replace_suffix, modification_time, \
file_newer_than, castrate_file, map_suffix
from Filenames import cplus_suffix, pxd_suffixes, pyx_suffixes, \
package_init_files, pyx_to_c_suffix
verbose = 0
debug_timestamps = 0
module_name_pattern = re.compile(
r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$")
class Context:
# This class encapsulates the context needed for compiling
# one or more Pyrex implementation files along with their
# associated and imported declaration files. It holds
# the root of the module import namespace and the list
# of directories to search for include files.
#
# modules {string : DefinitionScope}
# include_directories [string]
def __init__(self, include_directories):
self.modules = {"__builtin__" : Builtin.builtin_scope}
self.include_directories = include_directories
def find_module(self, module_name,
relative_to = None, pos = None, need_pxd = 1):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
# is the first time the module has been requested, finds
# the corresponding .pxd file and process it.
# If relative_to is not None, it must be a module scope,
# and the module will first be searched for relative to
# that module, provided its name is not a dotted name.
debug_find_module = 0
if debug_find_module:
print "Context.find_module: module_name =", module_name, \
"relative_to =", relative_to, "pos =", pos, "need_pxd =", need_pxd
scope = None
pxd_pathname = None
if "." not in module_name and relative_to:
if debug_find_module:
print "...trying relative import"
scope = relative_to.lookup_submodule(module_name)
if not scope:
qualified_name = relative_to.qualify_name(module_name)
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if pxd_pathname:
scope = relative_to.find_submodule(module_name)
if not scope:
if debug_find_module:
print "...trying absolute import"
scope = self
for name in module_name.split("."):
scope = scope.find_submodule(name)
if debug_find_module:
print "...scope =", scope
if not scope.pxd_file_loaded:
if debug_find_module:
print "...pxd not loaded"
scope.pxd_file_loaded = 1
if not pxd_pathname:
if debug_find_module:
print "...looking for pxd file"
pxd_pathname = self.find_pxd_file(module_name, pos)
if debug_find_module:
print "......found ", pxd_pathname
if not pxd_pathname and need_pxd:
error(pos, "Cannot find .pxd file for module '%s'" % module_name)
if pxd_pathname:
try:
if debug_find_module:
print "Context.find_module: Parsing", pxd_pathname
pxd_tree = self.parse(pxd_pathname, scope, pxd = 1)
pxd_tree.analyse_declarations(scope)
except CompileError:
pass
return scope
def find_pxd_file(self, qualified_name, pos):
# Search include path for the .pxd file corresponding to the
# given fully-qualified module name.
# Will find either a dotted filename or a file in a
# package directory. If a source file position is given,
# the directory containing the source file is searched first
# for a dotted filename, and its containing package root
# directory is searched first for a non-dotted filename.
return self.search_package_directories(qualified_name, pxd_suffixes, pos)
def find_pyx_file(self, qualified_name, pos):
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_package_directories(qualified_name, pyx_suffixes, pos)
def search_package_directories(self, qualified_name, suffixes, pos):
dotted_filenames = [qualified_name + suffix for suffix in suffixes]
if pos:
here = os.path.dirname(pos[0])
for dotted_filename in dotted_filenames:
path = os.path.join(here, dotted_filename)
if os.path.exists(path):
return path
dirs = self.include_directories
if pos:
here = self.find_root_package_dir(pos[0])
dirs = [here] + dirs
names = qualified_name.split(".")
package_names = names[:-1]
module_name = names[-1]
filenames = [module_name + suffix for suffix in suffixes]
for root in dirs:
for dotted_filename in dotted_filenames:
path = os.path.join(root, dotted_filename)
if os.path.exists(path):
return path
dir = self.descend_to_package_dir(root, package_names)
if dir:
for filename in filenames:
path = os.path.join(dir, filename)
if os.path.exists(path):
return path
for init_filename in package_init_files:
path = os.path.join(dir, module_name, init_filename)
if os.path.exists(path):
return path
def find_root_package_dir(self, file_path):
# Given the full pathname of a source file, find the directory
# containing the top-level package that it ultimately belongs to.
dir = os.path.dirname(file_path)
while 1:
if not self.is_package_dir(dir):
return dir
parent = os.path.dirname(dir)
if parent == dir:
return dir
dir = parent
def descend_to_package_dir(self, root_dir, package_names):
# Starting from the given root directory, look for a nested
# succession of package directories. Returns the full pathname
# of the innermost one, or None.
dir = root_dir
for name in package_names:
dir = os.path.join(dir, name)
if self.is_package_dir(dir):
return dir
def is_package_dir(self, dir_path):
# Return true if the given directory is a package directory.
for filename in package_init_files:
path = os.path.join(dir_path, filename)
if os.path.exists(path):
return 1
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
path = self.search_include_directories(filename, pos)
if not path:
error(pos, "'%s' not found" % filename)
return path
def search_include_directories(self, filename, pos):
# Search the list of include directories for the given
# file name. If a source file position is given, first
# searches the directory containing that file. Returns
# None if not found, but does not report an error.
dirs = self.include_directories
if pos:
here_dir = os.path.dirname(pos[0])
dirs = [here_dir] + dirs
for dir in dirs:
path = os.path.join(dir, filename)
if os.path.exists(path):
return path
return None
def lookup_submodule(self, name):
# Look up a top-level module. Returns None if not found.
return self.modules.get(name, None)
def find_submodule(self, name):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
scope = DefinitionScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
def parse(self, source_filename, scope, pxd):
# Parse the given source file and return a parse tree.
f = open(source_filename, "rU")
s = PyrexScanner(f, source_filename, scope = scope, context = self)
try:
tree = Parsing.p_module(s, pxd)
finally:
f.close()
if Errors.num_errors > 0:
raise CompileError
return tree
def extract_module_name(self, path):
# Find fully_qualified module name from the full pathname
# of a source file.
dir, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
if "." not in module_name:
if module_name == "__init__":
dir, module_name = os.path.split(dir)
names = [module_name]
while self.is_package_dir(dir):
parent, package_name = os.path.split(dir)
if parent == dir:
break
names.insert(0, package_name)
dir = parent
module_name = ".".join(names)
if not module_name_pattern.match(module_name):
raise CompileError((path, 0, 0),
"'%s' is not a valid module name" % module_name)
return module_name
def dep_file_out_of_date(self, source_path):
dep_path = replace_suffix(source_path, ".dep")
if not os.path.exists(dep_path):
return 1
dep_time = modification_time(dep_path)
return file_newer_than(source_path, dep_time)
def c_file_out_of_date(self, source_path):
if debug_timestamps:
print "Checking whether", source_path, "is out of date"
c_path = map_suffix(source_path, pyx_to_c_suffix, ".c")
if not os.path.exists(c_path):
if debug_timestamps:
print "...yes, c file doesn't exist"
return 1
c_time = modification_time(c_path)
if file_newer_than(source_path, c_time):
if debug_timestamps:
print "...yes, newer than c file"
return 1
pos = [source_path]
module_name = self.extract_module_name(source_path)
pxd_path = self.find_pxd_file(module_name, pos)
if pxd_path and file_newer_than(pxd_path, c_time):
if debug_timestamps:
print "...yes, pxd file newer than c file"
return 1
dep_path = replace_suffix(source_path, ".dep")
if not os.path.exists(dep_path):
if debug_timestamps:
print "...yes, dep file does not exist"
return 1
for kind, name in self.read_dependency_file(source_path):
if kind == "cimport":
dep_path = self.find_pxd_file(name, pos)
elif kind == "include":
dep_path = self.search_include_directories(name, pos)
else:
continue
if dep_path and file_newer_than(dep_path, c_time):
if debug_timestamps:
print "...yes,", dep_path, "newer than c file"
return 1
if debug_timestamps:
print "...no"
def find_cimported_module_names(self, source_path):
for kind, name in self.read_dependency_file(source_path):
if kind == "cimport":
yield name
def read_dependency_file(self, source_path):
dep_path = replace_suffix(source_path, ".dep")
if os.path.exists(dep_path):
f = open(dep_path, "rU")
for line in f.readlines():
chunks = line.strip().split(" ", 1)
if len(chunks) == 2:
yield chunks
f.close()
def compile(self, source, options = None):
# Compile a Pyrex implementation file in this context
# and return a CompilationResult.
if not options:
options = default_options
result = CompilationResult()
cwd = os.getcwd()
source = os.path.join(cwd, source)
if options.use_listing_file:
result.listing_file = replace_suffix(source, ".lis")
Errors.open_listing_file(result.listing_file,
echo_to_stderr = options.errors_to_stderr)
else:
Errors.open_listing_file(None)
if options.output_file:
result.c_file = os.path.join(cwd, options.output_file)
else:
if options.cplus:
result.c_file = replace_suffix(source, cplus_suffix)
else:
result.c_file = map_suffix(source, pyx_to_c_suffix, ".c")
module_name = self.extract_module_name(source)
initial_pos = (source, 1, 0)
def_scope = self.find_module(module_name, pos = initial_pos, need_pxd = 0)
imp_scope = ImplementationScope(def_scope)
errors_occurred = False
try:
tree = self.parse(source, imp_scope, pxd = 0)
tree.process_implementation(imp_scope, options, result)
except CompileError:
errors_occurred = True
Errors.close_listing_file()
result.num_errors = Errors.num_errors
if result.num_errors > 0:
errors_occurred = True
if errors_occurred and result.c_file:
try:
st = os.stat(source)
castrate_file(result.c_file, st)
except EnvironmentError:
pass
result.c_file = None
if result.c_file and not options.c_only and c_compile:
result.object_file = c_compile(result.c_file,
verbose_flag = options.show_version,
cplus = options.cplus)
if not options.obj_only and c_link:
result.extension_file = c_link(result.object_file,
extra_objects = options.objects,
verbose_flag = options.show_version,
cplus = options.cplus)
return result
#------------------------------------------------------------------------
#
# Main Python entry points
#
#------------------------------------------------------------------------
class CompilationOptions:
"""
Options to the Pyrex compiler:
show_version boolean Display version number
use_listing_file boolean Generate a .lis file
errors_to_stderr boolean Echo errors to stderr when using .lis
include_path [string] Directories to search for include files
output_file string Name of generated .c file
generate_pxi boolean Generate .pxi file for public declarations
recursive boolean Recursively find and compile dependencies
timestamps boolean Only compile changed source files. If None,
defaults to true when recursive is true.
verbose boolean Always print source names being compiled
quiet boolean Don't print source names in recursive mode
Following options are experimental and only used on MacOSX:
c_only boolean Stop after generating C file (default)
obj_only boolean Stop after compiling to .o file
objects [string] Extra .o files to link with
cplus boolean Compile as c++ code
"""
def __init__(self, defaults = None, c_compile = 0, c_link = 0, **kw):
self.include_path = []
self.objects = []
if defaults:
if isinstance(defaults, CompilationOptions):
defaults = defaults.__dict__
else:
defaults = default_options
self.__dict__.update(defaults)
self.__dict__.update(kw)
if c_compile:
self.c_only = 0
if c_link:
self.obj_only = 0
class CompilationResult:
"""
Results from the Pyrex compiler:
c_file string or None The generated C source file
h_file string or None The generated C header file
i_file string or None The generated .pxi file
api_file string or None The generated C API .h file
listing_file string or None File of error messages
object_file string or None Result of compiling the C file
extension_file string or None Result of linking the object file
num_errors integer Number of compilation errors
"""
def __init__(self):
self.c_file = None
self.h_file = None
self.i_file = None
self.api_file = None
self.listing_file = None
self.object_file = None
self.extension_file = None
class CompilationResultSet(dict):
"""
Results from compiling multiple Pyrex source files. A mapping
from source file paths to CompilationResult instances. Also
has the following attributes:
num_errors integer Total number of compilation errors
"""
num_errors = 0
def add(self, source, result):
self[source] = result
self.num_errors += result.num_errors
def compile_single(source, options):
"""
compile_single(source, options)
Compile the given Pyrex implementation file and return a CompilationResult.
Always compiles a single file; does not perform timestamp checking or
recursion.
"""
context = Context(options.include_path)
return context.compile(source, options)
def compile_multiple(sources, options):
"""
compile_multiple(sources, options)
Compiles the given sequence of Pyrex implementation files and returns
a CompilationResultSet. Performs timestamp checking and/or recursion
if these are specified in the options.
"""
sources = [os.path.abspath(source) for source in sources]
processed = set()
results = CompilationResultSet()
context = Context(options.include_path)
recursive = options.recursive
timestamps = options.timestamps
if timestamps is None:
timestamps = recursive
verbose = options.verbose or ((recursive or timestamps) and not options.quiet)
for source in sources:
if source not in processed:
if not timestamps or context.c_file_out_of_date(source):
if verbose:
print >>sys.stderr, "Compiling", source
result = context.compile(source, options)
results.add(source, result)
processed.add(source)
if recursive:
for module_name in context.find_cimported_module_names(source):
path = context.find_pyx_file(module_name, [source])
if path:
sources.append(path)
else:
print >>sys.stderr, \
"Cannot find .pyx file for cimported module '%s'" % module_name
return results
def compile(source, options = None, c_compile = 0, c_link = 0, **kwds):
"""
compile(source [, options], [, <option> = <value>]...)
Compile one or more Pyrex implementation files, with optional timestamp
checking and recursing on dependecies. The source argument may be a string
or a sequence of strings. If it is a string and no recursion or timestamp
checking is requested, a CompilationResult is returned, otherwise a
CompilationResultSet is returned.
"""
options = CompilationOptions(defaults = options, c_compile = c_compile,
c_link = c_link, **kwds)
if isinstance(source, basestring) and not options.timestamps \
and not options.recursive:
return compile_single(source, options)
else:
return compile_multiple(source, options)
#------------------------------------------------------------------------
#
# Main command-line entry point
#
#------------------------------------------------------------------------
def main(command_line = 0):
args = sys.argv[1:]
any_failures = 0
if command_line:
from CmdLine import parse_command_line
options, sources = parse_command_line(args)
else:
options = CompilationOptions(default_options)
sources = args
if options.show_version:
print >>sys.stderr, "Pyrex version %s" % Version.version
try:
result = compile(sources, options)
if result.num_errors > 0:
any_failures = 1
except EnvironmentError, e:
print >>sys.stderr, e
any_failures = 1
if any_failures:
sys.exit(1)
#------------------------------------------------------------------------
#
# Set the default options depending on the platform
#
#------------------------------------------------------------------------
default_options = dict(
show_version = 0,
use_listing_file = 0,
errors_to_stderr = 1,
c_only = 1,
obj_only = 1,
cplus = 0,
output_file = None,
generate_pxi = 0,
recursive = 0,
timestamps = None,
verbose = 0,
quiet = 0)
if sys.platform == "mac":
from Pyrex.Mac.MacSystem import c_compile, c_link, CCompilerError
default_options['use_listing_file'] = 1
elif sys.platform == "darwin":
from Pyrex.Mac.DarwinSystem import c_compile, c_link, CCompilerError
else:
c_compile = None
c_link = None
| |
# Copyright (c) 2016-2021, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Python2 compatibility
from __future__ import print_function
import sys
if sys.version_info < (3,):
range = xrange
import threading
try:
import queue
except ImportError:
import Queue as queue
import time
import signal
from copy import copy
from collections import defaultdict
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
import traceback
from bifrost import device, memory, core, affinity
from bifrost.ring2 import Ring, ring_view
from bifrost.temp_storage import TempStorage
from bifrost.proclog import ProcLog
from bifrost.ndarray import memset_array # TODO: This feels a bit hacky
from bifrost import telemetry
telemetry.track_module()
# Note: This must be called before any devices are initialized. It's also
# almost always desirable when running pipelines, so we do it here at
# module import time to make things easy.
device.set_devices_no_spin_cpu()
def izip(*iterables):
while True:
yield [next(it) for it in iterables]
thread_local = threading.local()
thread_local.pipeline_stack = []
def get_default_pipeline():
return thread_local.pipeline_stack[-1]
thread_local.blockscope_stack = []
def get_current_block_scope():
if len(thread_local.blockscope_stack):
return thread_local.blockscope_stack[-1]
else:
return None
def block_scope(*args, **kwargs):
return BlockScope(*args, **kwargs)
class BlockScope(object):
instance_count = 0
def __init__(self,
name=None,
gulp_nframe=None,
buffer_nframe=None,
buffer_factor=None,
core=None,
gpu=None,
share_temp_storage=False,
fuse=False):
if name is None:
name = 'BlockScope_%i' % BlockScope.instance_count
BlockScope.instance_count += 1
self._name = name
self._gulp_nframe = gulp_nframe
self._buffer_nframe = buffer_nframe
self._buffer_factor = buffer_factor
self._core = core
self._gpu = gpu
self._share_temp_storage = share_temp_storage
self._temp_storage_ = {}
self._fused = fuse
if fuse:
#if self._buffer_factor is None:
# self._buffer_factor = 1.0
if self._share_temp_storage is None:
self._share_temp_storage = True
self._parent_scope = get_current_block_scope()
if self._parent_scope is not None:
self._parent_scope.children.append(self)
self.name = self._parent_scope.name + '/' + self.name
self._children = []
def __enter__(self):
thread_local.blockscope_stack.append(self)
def __exit__(self, type, value, tb):
popped = thread_local.blockscope_stack.pop()
if __debug__:
assert(popped is self)
def __getattr__(self, name):
# Use child's value if set, othersize defer to parent
if '_'+name not in self.__dict__:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__, name))
self_value = getattr(self, '_' + name)
if self_value is not None:
return self_value
else:
if self._parent_scope is not None:
return getattr(self._parent_scope, name)
else:
return None
def _get_temp_storage(self, space):
if space not in self._temp_storage_:
self._temp_storage_[space] = TempStorage(space)
return self._temp_storage_[space]
def _get_scope_hierarchy(self):
"""Returns list of BlockScopes from root ancestor to self"""
scope_hierarchy = []
parent = self._parent_scope
while parent is not None:
scope_hierarchy.append(parent)
parent = parent._parent_scope
return reversed(scope_hierarchy)
def cache_scope_hierarchy(self):
self.scope_hierarchy = self._get_scope_hierarchy()
self.fused_ancestor = None
for ancestor in self.scope_hierarchy:
if ancestor._fused:
self.fused_ancestor = ancestor
break
def is_fused_with(self, other):
return (self.fused_ancestor is not None and
self.fused_ancestor is other.fused_ancestor)
def get_temp_storage(self, space):
# TODO: Cache the first share_temp_storage scope to avoid walking each time
for scope in self.scope_hierarchy:
if scope.share_temp_storage:
return scope._get_temp_storage(space)
return self._get_temp_storage(space)
def dot_graph(self, parent_graph=None):
from graphviz import Digraph
#graph_attr = {'label': self._name}
graph_attr = {}
if parent_graph is None:
g = Digraph('cluster_' + self._name, graph_attr=graph_attr)
else:
g = parent_graph.subgraph('cluster_' + self._name,
label=self._name)
for child in self._children:
if isinstance(child, Block):
block = child
label = block.name.split('/', 1)[1]
block_colors = defaultdict(lambda: 'white')
block_colors['CopyBlock'] = 'lightsteelblue'
block_type = block.__class__.__name__
fillcolor = block_colors[block_type]
g.node(block.name,
#label='%s: %s' % (block.type,block.name),
label=label,
shape='box',
style='filled',
fillcolor=fillcolor)
for oring in block.orings:
space_colors = {
'system': 'orange',
'cuda': 'limegreen',
'cuda_host': 'deepskyblue'
}
g.node(oring.name,
shape='ellipse',
style='filled',
fillcolor=space_colors[oring.space])
g.edge(block.name, oring.name)
for iring in block.irings:
g.edge(iring.name, block.name)
else:
#child.dot_graph(g)
g.subgraph(child.dot_graph())
return g
def try_join(thread, timeout=0.):
thread.join(timeout)
return not thread.is_alive()
# Utility function for joining a collection of threads with a timeout
def join_all(threads, timeout):
deadline = time.time() + timeout
alive_threads = list(threads)
while True:
alive_threads = [t for t in alive_threads if not try_join(t)]
available_time = max(deadline - time.time(), 0)
if (len(alive_threads) == 0 or
available_time == 0):
return alive_threads
alive_threads[0].join(available_time)
class PipelineInitError(Exception):
pass
class Pipeline(BlockScope):
instance_count = 0
def __init__(self, name=None, **kwargs):
if name is None:
name = 'Pipeline_%i' % Pipeline.instance_count
Pipeline.instance_count += 1
super(Pipeline, self).__init__(name=name, **kwargs)
self.blocks = []
self.shutdown_timeout = 5.
self.all_blocks_finished_initializing_event = threading.Event()
self.block_init_queue = queue.Queue()
def as_default(self):
return PipelineContext(self)
def synchronize_block_initializations(self):
# Wait for all blocks to finish initializing
uninitialized_blocks = set(self.blocks)
while len(uninitialized_blocks):
# Note: This will get stuck if a transform block has no input ring
block, init_succeeded = self.block_init_queue.get()
uninitialized_blocks.remove(block)
if not init_succeeded:
self.shutdown()
raise PipelineInitError(
"The following block failed to initialize: " + block.name)
# Tell blocks that they can begin data processing
self.all_blocks_finished_initializing_event.set()
def run(self):
# Launch blocks as threads
self.threads = [threading.Thread(target=block.run, name=block.name)
for block in self.blocks]
for thread in self.threads:
thread.daemon = True
thread.start()
self.synchronize_block_initializations()
# Wait for blocks to finish processing
for thread in self.threads:
# Note: Doing it this way allows signals to be caught here
while thread.is_alive():
thread.join(timeout=2**30)
def shutdown(self):
for block in self.blocks:
block.shutdown()
# Ensure all blocks can make progress
self.all_blocks_finished_initializing_event.set()
join_all(self.threads, timeout=self.shutdown_timeout)
for thread in self.threads:
if thread.is_alive():
print("WARNING: Thread %s did not shut down on time and will be killed" % thread.name)
def shutdown_on_signals(self, signals=None):
if signals is None:
signals = [signal.SIGHUP,
signal.SIGINT,
signal.SIGQUIT,
signal.SIGTERM,
signal.SIGTSTP]
for sig in signals:
signal.signal(sig, self._handle_signal_shutdown)
def _handle_signal_shutdown(self, signum, frame):
SIGNAL_NAMES = dict((k, v) for v, k in
reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and
not v.startswith('SIG_'))
print("WARNING: Received signal %i %s, shutting down pipeline" % (signum, SIGNAL_NAMES[signum]))
self.shutdown()
def __enter__(self):
thread_local.pipeline_stack.append(self)
return self
def __exit__(self, type, value, tb):
popped = thread_local.pipeline_stack.pop()
if __debug__:
assert(popped is self)
# Create the default pipeline object
thread_local.pipeline_stack.append(Pipeline())
thread_local.blockscope_stack.append(get_default_pipeline())
def get_ring(block_or_ring):
try:
return block_or_ring.orings[0]
except AttributeError:
return block_or_ring
def block_view(block, header_transform):
"""View a block with modified output headers
Use this function to adjust the output headers of a ring
on-the-fly, effectively producing a new 'view' of the block.
Args:
block (Block): Input block.
header_transform (function): A function f(hdr) -> new_hdr.
Returns:
A new block that acts as the old block but modifies its sequence
headers on-the-fly.
"""
new_block = copy(block)
new_block.orings = [ring_view(oring, header_transform)
for oring in new_block.orings]
return new_block
class Block(BlockScope):
instance_counts = defaultdict(lambda: 0)
def __init__(self, irings,
name=None,
type_=None,
**kwargs):
self.type = type_ or self.__class__.__name__
self.name = name or '%s_%i' % (self.type, Block.instance_counts[self.type])
Block.instance_counts[self.type] += 1
super(Block, self).__init__(**kwargs)
self.pipeline = get_default_pipeline()
self.pipeline.blocks.append(self)
# Allow Block instances to be passed in place of rings
irings = [get_ring(iring) for iring in irings]
self.irings = irings
valid_inp_spaces = self._define_valid_input_spaces()
for i, (iring, valid_spaces) in enumerate(zip(irings, valid_inp_spaces)):
if not memory.space_accessible(iring.space, valid_spaces):
raise ValueError("Block %s input %i's space must be accessible from one of: %s" %
(self.name, i, str(valid_spaces)))
self.orings = [] # Update this in subclass constructors
self.shutdown_event = threading.Event()
self.bind_proclog = ProcLog(self.name + "/bind")
self.in_proclog = ProcLog(self.name + "/in")
rnames = {'nring': len(self.irings)}
for i, r in enumerate(self.irings):
rnames['ring%i' % i] = r.name
self.in_proclog.update(rnames)
self.init_trace = ''.join(traceback.format_stack()[:-1])
def shutdown(self):
self.shutdown_event.set()
def create_ring(self, *args, **kwargs):
return Ring(*args, owner=self, **kwargs)
def run(self):
#affinity.set_openmp_cores(cpus) # TODO
core = self.core
if core is not None:
affinity.set_core(core if isinstance(core, int) else core[0])
self.bind_proclog.update({'ncore': 1,
'core0': affinity.get_core()})
if self.gpu is not None:
device.set_device(self.gpu)
self.cache_scope_hierarchy()
with ExitStack() as oring_stack:
active_orings = self.begin_writing(oring_stack, self.orings)
try:
self.main(active_orings)
except Exception:
self.pipeline.block_init_queue.put((self, False))
sys.stderr.write("From block instantiated here:\n")
sys.stderr.write(self.init_trace)
raise
def num_outputs(self):
# TODO: This is a little hacky
return len(self.orings)
def begin_writing(self, exit_stack, orings):
return [exit_stack.enter_context(oring.begin_writing())
for oring in orings]
def begin_sequences(self, exit_stack, orings, oheaders,
igulp_nframes, istride_nframes):
# Note: The gulp_nframe that is set in the output header does not
# include the overlap (i.e., it's based on stride not gulp).
ostride_nframes = self._define_output_nframes(istride_nframes)
for ohdr, ostride_nframe in zip(oheaders, ostride_nframes):
ohdr['gulp_nframe'] = ostride_nframe
ogulp_nframes = self._define_output_nframes(igulp_nframes)
# Note: This always specifies buffer_factor=1 on the assumption that
# additional buffering is defined by the reader(s) rather
# than the writer.
obuf_nframes = [1 * ogulp_nframe for ogulp_nframe in ogulp_nframes]
oseqs = [exit_stack.enter_context(oring.begin_sequence(ohdr,
ogulp_nframe,
obuf_nframe))
for (oring, ohdr, ogulp_nframe, obuf_nframe)
in zip(orings, oheaders, ogulp_nframes, obuf_nframes)]
# Synchronize all blocks here to ensure no sequence race conditions
self.pipeline.block_init_queue.put((self, True))
self.pipeline.all_blocks_finished_initializing_event.wait()
ogulp_overlaps = [ogulp_nframe - ostride_nframe
for ogulp_nframe, ostride_nframe
in zip(ogulp_nframes, ostride_nframes)]
return oseqs, ogulp_overlaps
def reserve_spans(self, exit_stack, oseqs, igulp_nframes=[]):
ogulp_nframes = self._define_output_nframes(igulp_nframes)
return [exit_stack.enter_context(oseq.reserve(ogulp_nframe))
for (oseq, ogulp_nframe) in zip(oseqs, ogulp_nframes)]
def commit_spans(self, ospans, ostrides_actual, ogulp_overlaps):
# Allow returning None to indicate complete consumption
if ostrides_actual is None:
ostrides = [None] * len(ospans)
# Note: If ospan.nframe < ogulp_overlap, no frames will be committed
ostrides = [ostride if ostride is not None
else max(ospan.nframe - ogulp_overlap, 0)
for (ostride, ospan, ogulp_overlap)
in zip(ostrides_actual, ospans, ogulp_overlaps)]
for ospan, ostride in zip(ospans, ostrides):
ospan.commit(ostride)
def _define_output_nframes(self, input_nframes):
return self.define_output_nframes(input_nframes)
def define_output_nframes(self, input_nframes):
"""Return output nframe for each output, given input_nframes.
"""
raise NotImplementedError
def _define_valid_input_spaces(self):
return self.define_valid_input_spaces()
def define_valid_input_spaces(self):
"""Return set of valid spaces (or 'any') for each input"""
return ['any'] * len(self.irings)
class SourceBlock(Block):
def __init__(self, sourcenames, gulp_nframe, space=None, *args, **kwargs):
super(SourceBlock, self).__init__([], *args, gulp_nframe=gulp_nframe, **kwargs)
self.sourcenames = sourcenames
default_space = 'cuda_host' if core.cuda_enabled() else 'system'
if space is None:
space = default_space
self.orings = [self.create_ring(space=space)]
self._seq_count = 0
self.perf_proclog = ProcLog(self.name + "/perf")
self.out_proclog = ProcLog(self.name + "/out")
rnames = {'nring': len(self.orings)}
for i, r in enumerate(self.orings):
rnames['ring%i' % i] = r.name
self.out_proclog.update(rnames)
def main(self, orings):
for sourcename in self.sourcenames:
if self.shutdown_event.is_set():
break
with self.create_reader(sourcename) as ireader:
oheaders = self.on_sequence(ireader, sourcename)
for ohdr in oheaders:
if 'time_tag' not in ohdr:
ohdr['time_tag'] = self._seq_count
if 'name' not in ohdr:
ohdr['name'] = 'unnamed-sequence-%i' % self._seq_count
self._seq_count += 1
with ExitStack() as oseq_stack:
oseqs, ogulp_overlaps = self.begin_sequences(
oseq_stack, orings, oheaders,
igulp_nframes=[],
istride_nframes=[])
while not self.shutdown_event.is_set():
prev_time = time.time()
with ExitStack() as ospan_stack:
ospans = self.reserve_spans(ospan_stack, oseqs)
cur_time = time.time()
reserve_time = cur_time - prev_time
prev_time = cur_time
ostrides_actual = self.on_data(ireader, ospans)
device.stream_synchronize()
self.commit_spans(ospans, ostrides_actual, ogulp_overlaps)
# TODO: Is this an OK way to detect end-of-data?
if any([ostride == 0 for ostride in ostrides_actual]):
break
cur_time = time.time()
process_time = cur_time - prev_time
prev_time = cur_time
self.perf_proclog.update({
'acquire_time': -1,
'reserve_time': reserve_time,
'process_time': process_time})
def define_output_nframes(self, _):
"""Return output nframe for each output, given input_nframes.
"""
return [self.gulp_nframe] * self.num_outputs()
def define_valid_input_spaces(self):
"""Return set of valid spaces (or 'any') for each input"""
return []
def create_reader(self, sourcename):
"""Return an object to use for reading source data"""
# TODO: Should return a dummy reader object here?
raise NotImplementedError
def on_sequence(self, reader, sourcename):
"""Return header for each output"""
raise NotImplementedError
def on_data(self, reader, ospans):
"""Process data from from ispans to ospans and return the number of
frames to commit for each output."""
raise NotImplementedError
def _span_slice(soft_slice):
# Infers optional values in soft_slice (i.e., those that are None)
start = soft_slice.start or 0
return slice(start,
soft_slice.stop,
soft_slice.step or (soft_slice.stop - start))
class MultiTransformBlock(Block):
def __init__(self, irings_, guarantee=True, *args, **kwargs):
super(MultiTransformBlock, self).__init__(irings_, *args, **kwargs)
# Note: Must use self.irings rather than irings_ because they may
# actually be Block instances.
self.guarantee = guarantee
self.orings = [self.create_ring(space=iring.space)
for iring in self.irings]
self._seq_count = 0
self.perf_proclog = ProcLog(self.name + "/perf")
self.sequence_proclogs = [ProcLog(self.name + "/sequence%i" % i)
for i in range(len(self.irings))]
self.out_proclog = ProcLog(self.name + "/out")
rnames = {'nring': len(self.orings)}
for i, r in enumerate(self.orings):
rnames['ring%i' % i] = r.name
self.out_proclog.update(rnames)
def main(self, orings):
for iseqs in izip(*[iring.read(guarantee=self.guarantee)
for iring in self.irings]):
if self.shutdown_event.is_set():
break
for i, iseq in enumerate(iseqs):
self.sequence_proclogs[i].update(iseq.header)
oheaders = self._on_sequence(iseqs)
for ohdr in oheaders:
if 'time_tag' not in ohdr:
ohdr['time_tag'] = self._seq_count
self._seq_count += 1
igulp_nframes = [self.gulp_nframe or iseq.header['gulp_nframe']
for iseq in iseqs]
igulp_overlaps = self._define_input_overlap_nframe(iseqs)
istride_nframes = igulp_nframes[:]
igulp_nframes = [igulp_nframe + nframe_overlap
for igulp_nframe, nframe_overlap
in zip(igulp_nframes, igulp_overlaps)]
for iseq, igulp_nframe in zip(iseqs, igulp_nframes):
if self.buffer_factor is None:
src_block = iseq.ring.owner
if src_block is not None and self.is_fused_with(src_block):
buffer_factor = 1
else:
buffer_factor = None
else:
buffer_factor = self.buffer_factor
iseq.resize(gulp_nframe=igulp_nframe,
buf_nframe=self.buffer_nframe,
buffer_factor=buffer_factor)
# TODO: Ever need to specify starting offset?
iframe0s = [0 for _ in igulp_nframes]
force_skip = False
with ExitStack() as oseq_stack:
oseqs, ogulp_overlaps = self.begin_sequences(
oseq_stack, orings, oheaders,
igulp_nframes, istride_nframes)
if self.shutdown_event.is_set():
break
prev_time = time.time()
for ispans in izip(*[iseq.read(igulp_nframe,
istride_nframe,
iframe0)
for (iseq, igulp_nframe, istride_nframe, iframe0)
in zip(iseqs, igulp_nframes, istride_nframes, iframe0s)]):
if self.shutdown_event.is_set():
return
if any([ispan.nframe_skipped for ispan in ispans]):
# There were skipped (overwritten) frames
with ExitStack() as ospan_stack:
iskip_slices = [slice(iframe0,
iframe0 + ispan.nframe_skipped,
istride_nframe)
for iframe0, istride_nframe, ispan in
zip(iframe0s, istride_nframes, ispans)]
iskip_nframes = [ispan.nframe_skipped
for ispan in ispans]
# ***TODO: Need to loop over multiple ospans here,
# because iskip_nframes can be
# arbitrarily large!
ospans = self.reserve_spans(ospan_stack, oseqs, iskip_nframes)
ostrides_actual = self._on_skip(iskip_slices, ospans)
device.stream_synchronize()
self.commit_spans(ospans, ostrides_actual, ogulp_overlaps)
if all([ispan.nframe == 0 for ispan in ispans]):
# No data to see here, move right along
continue
cur_time = time.time()
acquire_time = cur_time - prev_time
prev_time = cur_time
with ExitStack() as ospan_stack:
igulp_nframes = [ispan.nframe for ispan in ispans]
ospans = self.reserve_spans(ospan_stack, oseqs, igulp_nframes)
cur_time = time.time()
reserve_time = cur_time - prev_time
prev_time = cur_time
if not force_skip:
# *TODO: See if can fuse together multiple on_data calls here before
# calling stream_synchronize().
# Consider passing .data instead of rings here
ostrides_actual = self._on_data(ispans, ospans)
device.stream_synchronize()
any_frames_overwritten = any([ispan.nframe_overwritten
for ispan in ispans])
if force_skip or any_frames_overwritten:
# Note: To allow interrupted pipelines to catch up,
# we force-skip an additional gulp whenever
# a span is overwritten during on_data.
force_skip = any_frames_overwritten
iskip_slices = [slice(ispan.frame_offset,
ispan.frame_offset + ispan.nframe_overwritten,
istride_nframe)
for ispan, istride_nframe
in zip(ispans, istride_nframes)]
ostrides_actual = self._on_skip(iskip_slices, ospans)
device.stream_synchronize()
self.commit_spans(ospans, ostrides_actual, ogulp_overlaps)
cur_time = time.time()
process_time = cur_time - prev_time
prev_time = cur_time
self.perf_proclog.update({
'acquire_time': acquire_time,
'reserve_time': reserve_time,
'process_time': process_time})
# **TODO: This will not be called if an exception is raised
# Need to call it from a context manager somehow
self._on_sequence_end(iseqs)
def _on_sequence(self, iseqs):
return self.on_sequence(iseqs)
def _on_sequence_end(self, iseqs):
return self.on_sequence_end(iseqs)
def _on_data(self, ispans, ospans):
return self.on_data(ispans, ospans)
def _on_skip(self, islices, ospans):
return self.on_skip(islices, ospans)
def _define_input_overlap_nframe(self, iseqs):
return self.define_input_overlap_nframe(iseqs)
def define_input_overlap_nframe(self, iseqs):
"""Return no. input frames that should overlap between successive spans
for each input sequence.
"""
return [0] * len(self.irings)
def define_output_nframes(self, input_nframes):
"""Return output nframe for each output, given input_nframes.
"""
return input_nframes
def on_sequence(self, iseqs):
"""Return: oheaders (one per output)
"""
raise NotImplementedError
def on_sequence_end(self, iseqs):
"""Do any necessary cleanup"""
pass
def on_data(self, ispans, ospans):
"""Process data from from ispans to ospans and return the number of
frames to commit for each output (or None to commit complete spans)."""
raise NotImplementedError
def on_skip(self, islices, ospans):
"""Handle skipped frames"""
raise NotImplementedError
class TransformBlock(MultiTransformBlock):
def __init__(self, iring, *args, **kwargs):
super(TransformBlock, self).__init__([iring], *args, **kwargs)
self.iring = self.irings[0]
def _define_valid_input_spaces(self):
spaces = self.define_valid_input_spaces()
return [spaces]
def define_valid_input_spaces(self):
"""Return set of valid spaces (or 'any') for the input"""
return 'any'
def _define_input_overlap_nframe(self, iseqs):
return [self.define_input_overlap_nframe(iseqs[0])]
def define_input_overlap_nframe(self, iseq):
"""Return no. input frames that should overlap between successive spans.
"""
return 0
def _define_output_nframes(self, input_nframes):
output_nframe = self.define_output_nframes(input_nframes[0])
return [output_nframe]
def define_output_nframes(self, input_nframe):
"""Return number of frames that will be produced given input_nframe
"""
return input_nframe
def _on_sequence(self, iseqs):
return [self.on_sequence(iseqs[0])]
def on_sequence(self, iseq):
"""Return oheader"""
raise NotImplementedError
def _on_sequence_end(self, iseqs):
return [self.on_sequence_end(iseqs[0])]
def on_sequence_end(self, iseq):
"""Do any necessary cleanup"""
pass
def _on_data(self, ispans, ospans):
nframe_commit = self.on_data(ispans[0], ospans[0])
return [nframe_commit]
def on_data(self, ispan, ospan):
"""Return the number of output frames to commit, or None to commit all
"""
raise NotImplementedError
def _on_skip(self, islices, ospans):
return [self.on_skip(islices[0], ospans[0])]
def on_skip(self, islice, ospan):
"""Handle skipped frames"""
# Note: This zeros the whole gulp, even though only part of the gulp
# may have been overwritten.
memset_array(ospan.data, 0)
#for i in range(0, ispan.nframe_skipped, igulp_nframe):
# inframe = min(igulp_nframe, inskipped - i)
# onframe = self._define_output_nframes(inframe)
# with oseq.reserve(onframe) as ospan:
# bf.ndarray.memset_array(ospan.data, 0)
# TODO: Need something like on_sequence_end to allow closing open files etc.
class SinkBlock(MultiTransformBlock):
def __init__(self, iring, *args, **kwargs):
super(SinkBlock, self).__init__([iring], *args, **kwargs)
self.orings = []
self.iring = self.irings[0]
def _define_valid_input_spaces(self):
spaces = self.define_valid_input_spaces()
return [spaces]
def define_valid_input_spaces(self):
"""Return set of valid spaces (or 'any') for the input"""
return 'any'
def _define_input_overlap_nframe(self, iseqs):
return [self.define_input_overlap_nframe(iseqs[0])]
def define_input_overlap_nframe(self, iseq):
"""Return no. input frames that should overlap between successive spans.
"""
return 0
def _define_output_nframes(self, input_nframes):
return []
def _on_sequence(self, iseqs):
self.on_sequence(iseqs[0])
return []
def on_sequence(self, iseq):
"""Return islice or None to use simple striding"""
raise NotImplementedError
def _on_sequence_end(self, iseqs):
return [self.on_sequence_end(iseqs[0])]
def on_sequence_end(self, iseq):
"""Do any necessary cleanup"""
pass
def _on_data(self, ispans, ospans):
self.on_data(ispans[0])
return []
def on_data(self, ispan):
"""Return nothing"""
raise NotImplementedError
| |
# ext/declarative/clsregistry.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Routines to handle the string class registry used by declarative.
This system allows specification of classes and expressions used in
:func:`_orm.relationship` using strings.
"""
import weakref
from . import attributes
from . import interfaces
from .descriptor_props import SynonymProperty
from .properties import ColumnProperty
from .util import class_mapper
from .. import exc
from .. import inspection
from .. import util
from ..sql.schema import _get_table_key
# strong references to registries which we place in
# the _decl_class_registry, which is usually weak referencing.
# the internal registries here link to classes with weakrefs and remove
# themselves when all references to contained classes are removed.
_registries = set()
def add_class(classname, cls, decl_class_registry):
"""Add a class to the _decl_class_registry associated with the
given declarative class.
"""
if classname in decl_class_registry:
# class already exists.
existing = decl_class_registry[classname]
if not isinstance(existing, _MultipleClassMarker):
existing = decl_class_registry[classname] = _MultipleClassMarker(
[cls, existing]
)
else:
decl_class_registry[classname] = cls
try:
root_module = decl_class_registry["_sa_module_registry"]
except KeyError:
decl_class_registry[
"_sa_module_registry"
] = root_module = _ModuleMarker("_sa_module_registry", None)
tokens = cls.__module__.split(".")
# build up a tree like this:
# modulename: myapp.snacks.nuts
#
# myapp->snack->nuts->(classes)
# snack->nuts->(classes)
# nuts->(classes)
#
# this allows partial token paths to be used.
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.add_class(classname, cls)
def remove_class(classname, cls, decl_class_registry):
if classname in decl_class_registry:
existing = decl_class_registry[classname]
if isinstance(existing, _MultipleClassMarker):
existing.remove_item(cls)
else:
del decl_class_registry[classname]
try:
root_module = decl_class_registry["_sa_module_registry"]
except KeyError:
return
tokens = cls.__module__.split(".")
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.remove_class(classname, cls)
def _key_is_empty(key, decl_class_registry, test):
"""test if a key is empty of a certain object.
used for unit tests against the registry to see if garbage collection
is working.
"test" is a callable that will be passed an object should return True
if the given object is the one we were looking for.
We can't pass the actual object itself b.c. this is for testing garbage
collection; the caller will have to have removed references to the
object itself.
"""
if key not in decl_class_registry:
return True
thing = decl_class_registry[key]
if isinstance(thing, _MultipleClassMarker):
for sub_thing in thing.contents:
if test(sub_thing):
return False
else:
return not test(thing)
class _MultipleClassMarker(object):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
__slots__ = "on_remove", "contents", "__weakref__"
def __init__(self, classes, on_remove=None):
self.on_remove = on_remove
self.contents = set(
[weakref.ref(item, self._remove_item) for item in classes]
)
_registries.add(self)
def remove_item(self, cls):
self._remove_item(weakref.ref(cls))
def __iter__(self):
return (ref() for ref in self.contents)
def attempt_get(self, path, key):
if len(self.contents) > 1:
raise exc.InvalidRequestError(
'Multiple classes found for path "%s" '
"in the registry of this declarative "
"base. Please use a fully module-qualified path."
% (".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref):
self.contents.discard(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item):
# protect against class registration race condition against
# asynchronous garbage collection calling _remove_item,
# [ticket:3208]
modules = set(
[
cls.__module__
for cls in [ref() for ref in self.contents]
if cls is not None
]
)
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table."
% (item.__module__, item.__name__)
)
self.contents.add(weakref.ref(item, self._remove_item))
class _ModuleMarker(object):
"""Refers to a module name within
_decl_class_registry.
"""
__slots__ = "parent", "name", "contents", "mod_ns", "path", "__weakref__"
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.contents = {}
self.mod_ns = _ModNS(self)
if self.parent:
self.path = self.parent.path + [self.name]
else:
self.path = []
_registries.add(self)
def __contains__(self, name):
return name in self.contents
def __getitem__(self, name):
return self.contents[name]
def _remove_item(self, name):
self.contents.pop(name, None)
if not self.contents and self.parent is not None:
self.parent._remove_item(self.name)
_registries.discard(self)
def resolve_attr(self, key):
return getattr(self.mod_ns, key)
def get_module(self, name):
if name not in self.contents:
marker = _ModuleMarker(name, self)
self.contents[name] = marker
else:
marker = self.contents[name]
return marker
def add_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.add_item(cls)
else:
existing = self.contents[name] = _MultipleClassMarker(
[cls], on_remove=lambda: self._remove_item(name)
)
def remove_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.remove_item(cls)
class _ModNS(object):
__slots__ = ("__parent",)
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise AttributeError(
"Module %r has no mapped classes "
"registered under the name %r" % (self.__parent.name, key)
)
class _GetColumns(object):
__slots__ = ("cls",)
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise AttributeError(
"Class %r does not have a mapped column named %r"
% (self.cls, key)
)
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NOT_EXTENSION:
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key
)
return getattr(self.cls, key)
inspection._inspects(_GetColumns)(
lambda target: inspection.inspect(target.cls)
)
class _GetTable(object):
__slots__ = "key", "metadata"
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[_get_table_key(key, self.key)]
def _determine_container(key, value):
if isinstance(value, _MultipleClassMarker):
value = value.attempt_get([], key)
return _GetColumns(value)
class _class_resolver(object):
__slots__ = (
"cls",
"prop",
"arg",
"fallback",
"_dict",
"_resolvers",
"favor_tables",
)
def __init__(self, cls, prop, fallback, arg, favor_tables=False):
self.cls = cls
self.prop = prop
self.arg = arg
self.fallback = fallback
self._dict = util.PopulateDict(self._access_cls)
self._resolvers = ()
self.favor_tables = favor_tables
def _access_cls(self, key):
cls = self.cls
manager = attributes.manager_of_class(cls)
decl_base = manager.registry
decl_class_registry = decl_base._class_registry
metadata = decl_base.metadata
if self.favor_tables:
if key in metadata.tables:
return metadata.tables[key]
elif key in metadata._schemas:
return _GetTable(key, cls.metadata)
if key in decl_class_registry:
return _determine_container(key, decl_class_registry[key])
if not self.favor_tables:
if key in metadata.tables:
return metadata.tables[key]
elif key in metadata._schemas:
return _GetTable(key, cls.metadata)
if (
"_sa_module_registry" in decl_class_registry
and key in decl_class_registry["_sa_module_registry"]
):
registry = decl_class_registry["_sa_module_registry"]
return registry.resolve_attr(key)
elif self._resolvers:
for resolv in self._resolvers:
value = resolv(key)
if value is not None:
return value
return self.fallback[key]
def _raise_for_name(self, name, err):
util.raise_(
exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined."
% (self.prop.parent, self.arg, name, self.cls)
),
from_=err,
)
def _resolve_name(self):
name = self.arg
d = self._dict
rval = None
try:
for token in name.split("."):
if rval is None:
rval = d[token]
else:
rval = getattr(rval, token)
except KeyError as err:
self._raise_for_name(name, err)
except NameError as n:
self._raise_for_name(n.args[0], n)
else:
if isinstance(rval, _GetColumns):
return rval.cls
else:
return rval
def __call__(self):
try:
x = eval(self.arg, globals(), self._dict)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError as n:
self._raise_for_name(n.args[0], n)
_fallback_dict = None
def _resolver(cls, prop):
global _fallback_dict
if _fallback_dict is None:
import sqlalchemy
from sqlalchemy.orm import foreign, remote
_fallback_dict = util.immutabledict(sqlalchemy.__dict__).union(
{"foreign": foreign, "remote": remote}
)
def resolve_arg(arg, favor_tables=False):
return _class_resolver(
cls, prop, _fallback_dict, arg, favor_tables=favor_tables
)
def resolve_name(arg):
return _class_resolver(cls, prop, _fallback_dict, arg)._resolve_name
return resolve_name, resolve_arg
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parser module."""
import re
import textwrap
import gast
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.platform import test
class ParserTest(test.TestCase):
def assertAstMatches(self, actual_node, expected_node_src, expr=True):
if expr:
# Ensure multi-line expressions parse.
expected_node = gast.parse('({})'.format(expected_node_src)).body[0]
expected_node = expected_node.value
else:
expected_node = gast.parse(expected_node_src).body[0]
msg = 'AST did not match expected:\n{}\nActual:\n{}'.format(
pretty_printer.fmt(expected_node),
pretty_printer.fmt(actual_node))
self.assertTrue(ast_util.matches(actual_node, expected_node), msg)
def test_parse_entity(self):
def f(x):
return x + 1
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def test_parse_lambda(self):
l = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(l, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_parse_lambda_prefix_cleanup(self):
lambda_lam = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(lambda_lam, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_parse_lambda_resolution_by_location(self):
_ = lambda x: x + 1
l = lambda x: x + 1
_ = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(l, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x: x + 1')
def test_parse_lambda_resolution_by_signature(self):
l = lambda x: lambda x, y: x + y
node, source = parser.parse_entity(l, future_features=())
expected_node_src = 'lambda x: (lambda x, y: (x + y))'
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x: lambda x, y: x + y')
node, source = parser.parse_entity(l(0), future_features=())
expected_node_src = 'lambda x, y: (x + y)'
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x, y: x + y')
def test_parse_lambda_resolution_ambiguous(self):
l = lambda x: lambda x: 2 * x
expected_exception_text = re.compile(r'found multiple definitions'
r'.+'
r'\(?lambda x: \(?lambda x'
r'.+'
r'\(?lambda x: \(?2', re.DOTALL)
with self.assertRaisesRegex(
errors.UnsupportedLanguageElementError,
expected_exception_text):
parser.parse_entity(l, future_features=())
with self.assertRaisesRegex(
errors.UnsupportedLanguageElementError,
expected_exception_text):
parser.parse_entity(l(0), future_features=())
def assertMatchesWithPotentialGarbage(self, source, expected, garbage):
# In runtimes which don't track end_col_number, the source contains the
# entire line, which in turn may have garbage from the surrounding context.
self.assertIn(source, (expected, expected + garbage))
def test_parse_lambda_multiline(self):
l = (
lambda x: lambda y: x + y # pylint:disable=g-long-lambda
- 1)
node, source = parser.parse_entity(l, future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) - 1))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, ('lambda x: lambda y: x + y # pylint:disable=g-long-lambda\n'
' - 1'), ')')
node, source = parser.parse_entity(l(0), future_features=())
expected_node_src = 'lambda y: ((x + y) - 1)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, ('lambda y: x + y # pylint:disable=g-long-lambda\n'
' - 1'), ')')
def test_parse_lambda_in_expression(self):
l = (
lambda x: lambda y: x + y + 1,
lambda x: lambda y: x + y + 2,
)
node, source = parser.parse_entity(l[0], future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) + 1))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda x: lambda y: x + y + 1', ',')
node, source = parser.parse_entity(l[0](0), future_features=())
expected_node_src = 'lambda y: ((x + y) + 1)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda y: x + y + 1', ',')
node, source = parser.parse_entity(l[1], future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) + 2))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(source,
'lambda x: lambda y: x + y + 2', ',')
node, source = parser.parse_entity(l[1](0), future_features=())
expected_node_src = 'lambda y: ((x + y) + 2)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(source, 'lambda y: x + y + 2', ',')
def test_parse_lambda_complex_body(self):
l = lambda x: ( # pylint:disable=g-long-lambda
x.y(
[],
x.z,
(),
x[0:2],
),
x.u,
'abc',
1,
)
node, source = parser.parse_entity(l, future_features=())
expected_node_src = "lambda x: (x.y([], x.z, (), x[0:2]), x.u, 'abc', 1)"
self.assertAstMatches(node, expected_node_src)
base_source = ('lambda x: ( # pylint:disable=g-long-lambda\n'
' x.y(\n'
' [],\n'
' x.z,\n'
' (),\n'
' x[0:2],\n'
' ),\n'
' x.u,\n'
' \'abc\',\n'
' 1,')
# The complete source includes the trailing parenthesis. But that is only
# detected in runtimes which correctly track end_lineno for ASTs.
self.assertMatchesWithPotentialGarbage(source, base_source, '\n )')
def test_parse_lambda_function_call_definition(self):
def do_parse_and_test(lam, **unused_kwargs):
node, source = parser.parse_entity(lam, future_features=())
expected_node_src = 'lambda x: x'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda x: x', ', named_arg=1)')
do_parse_and_test( # Intentional line break
lambda x: x, named_arg=1)
def test_parse_entity_print_function(self):
def f(x):
print(x)
node, _ = parser.parse_entity(f, future_features=('print_function',))
self.assertEqual('f', node.name)
def test_parse_comments(self):
def f():
# unindented comment
pass
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def test_parse_multiline_strings(self):
def f():
print("""
multiline
string""")
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def _eval_code(self, code, name):
globs = {}
exec(code, globs) # pylint:disable=exec-used
return globs[name]
def test_dedent_block_basic(self):
code = """
def f(x):
if x > 0:
return -x
return x
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(1), -1)
self.assertEqual(f(-1), -1)
def test_dedent_block_comments_out_of_line(self):
code = """
###
def f(x):
###
if x > 0:
###
return -x
###
###
return x
###
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(1), -1)
self.assertEqual(f(-1), -1)
def test_dedent_block_multiline_string(self):
code = """
def f():
'''
Docstring.
'''
return '''
1
2
3'''
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f.__doc__, '\n Docstring.\n ')
self.assertEqual(f(), '\n 1\n 2\n 3')
def test_dedent_block_multiline_expression(self):
code = """
def f():
return (1,
2,
3)
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), (1, 2, 3))
def test_dedent_block_continuation(self):
code = r"""
def f():
a = \
1
return a
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), 1)
def test_dedent_block_continuation_in_string(self):
code = r"""
def f():
a = "a \
b"
return a
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), 'a b')
def test_parse_expression(self):
node = parser.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
def test_unparse(self):
node = gast.If(
test=gast.Constant(1, kind=None),
body=[
gast.Assign(
targets=[
gast.Name(
'a',
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Name(
'b', ctx=gast.Load(), annotation=None, type_comment=None))
],
orelse=[
gast.Assign(
targets=[
gast.Name(
'a',
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Constant('c', kind=None))
])
source = parser.unparse(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
# coding=utf-8
if 1:
a = b
else:
a = 'c'
""").strip(), source.strip())
def test_ext_slice_roundtrip(self):
def ext_slice(n):
return n[:, :], n[0, :], n[:, 0]
node, _ = parser.parse_entity(ext_slice, future_features=())
source = parser.unparse(node)
self.assertAstMatches(node, source, expr=False)
if __name__ == '__main__':
test.main()
| |
from pycalphad import Database, variables as v
from pycalphad.plot.binary.compsets import BinaryCompset, CompsetPair
from pycalphad.plot.binary.map import map_binary
from pycalphad.plot.binary.zpf_boundary_sets import TwoPhaseRegion, ZPFBoundarySets
from pycalphad.tests.datasets import *
ALFE_DBF = Database(ALFE_TDB)
def test_binary_mapping():
"""
Binary mapping should return a ZPFBoundarySets object
"""
my_phases = ['LIQUID', 'FCC_A1', 'HCP_A3', 'AL5FE2',
'AL2FE', 'AL13FE4', 'AL5FE4']
comps = ['AL', 'FE', 'VA']
conds = {v.T: (1200, 1300, 50), v.P: 101325, v.X('AL'): (0, 1, 0.2)}
zpf_boundaries = map_binary(ALFE_DBF, comps, my_phases, conds)
num_boundaries = len(zpf_boundaries.all_compsets)
assert num_boundaries > 0
# calling binplot again can add more boundaries
map_binary(ALFE_DBF, comps, my_phases, conds, boundary_sets=zpf_boundaries)
assert len(zpf_boundaries.all_compsets) == 2*num_boundaries
def test_two_phase_region_usage():
"""A new pair of compsets at a slightly higher temperature should be in the region and can be added"""
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_300 = CompsetPair([
BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),
])
tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K
assert tpr.compsets_belong_in_region(compsets_300)
tpr.add_compsets(compsets_300)
assert len(tpr.compsets) == 2
def test_two_phase_region_outside_temperature_tolerance_does_not_belong():
"""A CompsetPair with very different temperature should not belong in the TwoPhaseRegion"""
compsets_300 = CompsetPair([
BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),
])
compsets_500 = CompsetPair([
BinaryCompset('P1', 500, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 500, 'B', 0.8, [0.2, 0.8]),
])
tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K
assert tpr.compsets_belong_in_region(compsets_500) is False
def test_two_phase_region_expands_as_compsets_are_added():
"""A CompsetPair with very different temperature should not belong in the TwoPhaseRegion"""
compsets_300 = CompsetPair([
BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),
])
compsets_305 = CompsetPair([
BinaryCompset('P1', 305, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 305, 'B', 0.8, [0.2, 0.8]),
])
compsets_312 = CompsetPair([
BinaryCompset('P1', 312, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 312, 'B', 0.8, [0.2, 0.8]),
])
tpr = TwoPhaseRegion(compsets_300) # Initial compsets for P1 and P2 at 300 K
# compsets don't belong because they are outside the temperature tolerance (10 K)
assert tpr.compsets_belong_in_region(compsets_312) is False
assert tpr.compsets_belong_in_region(compsets_305)
tpr.add_compsets(compsets_305)
# 312 K compsets could be added now that the 305 K is within 10 K.
assert tpr.compsets_belong_in_region(compsets_312)
def test_two_phase_region_new_phases_does_not_belong():
"""A new pair of compsets with different phases should not be in the TwoPhaseRegion"""
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
tpr = TwoPhaseRegion(compsets_298) # Initial compsets for P1 and P2 at 298 K
assert tpr.compsets_belong_in_region(compsets_300_diff_phases) is False
def test_adding_compsets_to_zpf_boundary_sets():
"""Test that new composition sets can be added to ZPFBoundarySets successfully."""
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_300 = CompsetPair([
BinaryCompset('P1', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 300, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))
assert zpfbs.components == ['A', 'B']
assert len(zpfbs.two_phase_regions) == 0
assert len(zpfbs.all_compsets) == 0
zpfbs.add_compsets(compsets_298)
assert len(zpfbs.all_compsets) == 1
assert len(zpfbs.two_phase_regions) == 1
zpfbs.add_compsets(compsets_300) # same region, different temperature
assert len(zpfbs.all_compsets) == 2
assert len(zpfbs.two_phase_regions) == 1
zpfbs.add_compsets(compsets_300_diff_phases) # new region, different phases
assert len(zpfbs.all_compsets) == 3
assert len(zpfbs.two_phase_regions) == 2
def test_rebulding_zpf_boundary_sets_regions():
"""Test that three regions generated by ZPFBoundarySets can correctly be rebuilt to two regions"""
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_310 = CompsetPair([
BinaryCompset('P1', 310, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 310, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))
# Initial compsets
zpfbs.add_compsets(compsets_298)
assert len(zpfbs.all_compsets) == 1
assert len(zpfbs.two_phase_regions) == 1
# Compsets added create a new region because phases changed
zpfbs.add_compsets(compsets_300_diff_phases)
assert len(zpfbs.all_compsets) == 2
assert len(zpfbs.two_phase_regions) == 2
# Compsets added create a new region because phases the temperature is out of tolerance
zpfbs.add_compsets(compsets_310)
assert len(zpfbs.all_compsets) == 3
assert len(zpfbs.two_phase_regions) == 3
# Rebuild the regions with a larger tolerance should create two regions with one and two compsets.
zpfbs.rebuild_two_phase_regions(Ttol=20)
assert len(zpfbs.all_compsets) == 3
assert len(zpfbs.two_phase_regions) == 2
assert sorted([len(tpr.compsets) for tpr in zpfbs.two_phase_regions]) == [1, 2]
def test_zpf_boundary_sets_line_plot():
"""Test creating scatter plot LineCollections works"""
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))
zpfbs.add_compsets(compsets_298)
zpfbs.add_compsets(compsets_300_diff_phases)
boundaries, tielines, legend = zpfbs.get_line_plot_boundaries()
assert len(boundaries._paths) > 0
assert len(tielines._paths) > 0
def test_zpf_boundary_set_scatter_plot():
"""Test creating scatter plot LineCollections works"""
compsets_298 = CompsetPair([
BinaryCompset('P1', 298.15, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P2', 298.15, 'B', 0.8, [0.2, 0.8]),
])
compsets_300_diff_phases = CompsetPair([
BinaryCompset('P2', 300, 'B', 0.5, [0.5, 0.5]),
BinaryCompset('P3', 300, 'B', 0.8, [0.2, 0.8]),
])
zpfbs = ZPFBoundarySets(['A', 'B'], v.X('B'))
zpfbs.add_compsets(compsets_298)
zpfbs.add_compsets(compsets_300_diff_phases)
boundaries, tielines, legend = zpfbs.get_scatter_plot_boundaries()
x, y, col = boundaries['x'], boundaries['y'], boundaries['c']
assert len(x) > 0
assert len(x) == len(y)
assert len(x) == len(col)
assert len(tielines._paths) > 0
| |
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python imports
import imp
import os
import traceback
import re
import socket
import fnmatch
from resource_management.core.logger import Logger
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
try:
with open(PARENT_FILE, 'rb') as fp:
service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
except Exception as e:
traceback.print_exc()
print "Failed to load parent"
class KafkaServiceAdvisor(service_advisor.ServiceAdvisor):
def __init__(self, *args, **kwargs):
self.as_super = super(KafkaServiceAdvisor, self)
self.as_super.__init__(*args, **kwargs)
# Always call these methods
self.modifyMastersWithMultipleInstances()
self.modifyCardinalitiesDict()
self.modifyHeapSizeProperties()
self.modifyNotValuableComponents()
self.modifyComponentsNotPreferableOnServer()
self.modifyComponentLayoutSchemes()
def modifyMastersWithMultipleInstances(self):
"""
Modify the set of masters with multiple instances.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyCardinalitiesDict(self):
"""
Modify the dictionary of cardinalities.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyHeapSizeProperties(self):
"""
Modify the dictionary of heap size properties.
Must be overriden in child class.
"""
pass
def modifyNotValuableComponents(self):
"""
Modify the set of components whose host assignment is based on other services.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentsNotPreferableOnServer(self):
"""
Modify the set of components that are not preferable on the server.
Must be overriden in child class.
"""
# Nothing to do
pass
def modifyComponentLayoutSchemes(self):
"""
Modify layout scheme dictionaries for components.
The scheme dictionary basically maps the number of hosts to
host index where component should exist.
Must be overriden in child class.
"""
# Nothing to do
pass
def getServiceComponentLayoutValidations(self, services, hosts):
"""
Get a list of errors.
Must be overriden in child class.
"""
return []
def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
"""
Entry point.
Must be overriden in child class.
"""
#Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
# (self.__class__.__name__, inspect.stack()[0][3]))
recommender = KafkaRecommender()
recommender.recommendKafkaConfigurationsFromHDP22(configurations, clusterData, services, hosts)
recommender.recommendKAFKAConfigurationsFromHDP23(configurations, clusterData, services, hosts)
recommender.recommendKAFKAConfigurationsFromHDP26(configurations, clusterData, services, hosts)
def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
"""
Entry point.
Validate configurations for the service. Return a list of errors.
The code for this function should be the same for each Service Advisor.
"""
#Logger.info("Class: %s, Method: %s. Validating Configurations." %
# (self.__class__.__name__, inspect.stack()[0][3]))
validator = KafkaValidator()
# Calls the methods of the validator using arguments,
# method(siteProperties, siteRecommendations, configurations, services, hosts)
return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
class KafkaRecommender(service_advisor.ServiceAdvisor):
"""
Kafka Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
"""
def __init__(self, *args, **kwargs):
self.as_super = super(KafkaRecommender, self)
self.as_super.__init__(*args, **kwargs)
def recommendKafkaConfigurationsFromHDP22(self, configurations, clusterData, services, hosts):
kafka_mounts = [
("log.dirs", "KAFKA_BROKER", "/kafka-logs", "multi")
]
self.updateMountProperties("kafka-broker", kafka_mounts, configurations, services, hosts)
def recommendKAFKAConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
kafka_broker = self.getServicesSiteProperties(services, "kafka-broker")
security_enabled = self.isSecurityEnabled(services)
putKafkaBrokerProperty = self.putProperty(configurations, "kafka-broker", services)
putKafkaLog4jProperty = self.putProperty(configurations, "kafka-log4j", services)
putKafkaBrokerAttributes = self.putPropertyAttribute(configurations, "kafka-broker")
if security_enabled:
kafka_env = slef.getServicesSiteProperties(services, "kafka-env")
kafka_user = kafka_env.get('kafka_user') if kafka_env is not None else None
if kafka_user is not None:
kafka_super_users = kafka_broker.get('super.users') if kafka_broker is not None else None
# kafka_super_super_users is expected to be formatted as: User:user1;User:user2
if kafka_super_users is not None and kafka_super_users != '':
# Parse kafka_super_users to get a set of unique user names and rebuild the property value
user_names = set()
user_names.add(kafka_user)
for match in re.findall('User:([^;]*)', kafka_super_users):
user_names.add(match)
kafka_super_users = 'User:' + ";User:".join(user_names)
else:
kafka_super_users = 'User:' + kafka_user
putKafkaBrokerProperty("super.users", kafka_super_users)
putKafkaBrokerProperty("principal.to.local.class", "kafka.security.auth.KerberosPrincipalToLocal")
putKafkaBrokerProperty("security.inter.broker.protocol", "PLAINTEXTSASL")
putKafkaBrokerProperty("zookeeper.set.acl", "true")
else: # not security_enabled
# remove unneeded properties
putKafkaBrokerAttributes('super.users', 'delete', 'true')
putKafkaBrokerAttributes('principal.to.local.class', 'delete', 'true')
putKafkaBrokerAttributes('security.inter.broker.protocol', 'delete', 'true')
# Update ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled to match ranger-env/ranger-kafka-plugin-enabled
if "ranger-env" in services["configurations"] \
and "ranger-kafka-plugin-properties" in services["configurations"] \
and "ranger-kafka-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
putKafkaRangerPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
ranger_kafka_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-kafka-plugin-enabled"]
putKafkaRangerPluginProperty("ranger-kafka-plugin-enabled", ranger_kafka_plugin_enabled)
ranger_plugin_enabled = False
# Only if the RANGER service is installed....
if "RANGER" in servicesList:
# If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled,
# determine if the Ranger/Kafka plug-in enabled enabled or not
if 'ranger-kafka-plugin-properties' in configurations and \
'ranger-kafka-plugin-enabled' in configurations['ranger-kafka-plugin-properties']['properties']:
ranger_plugin_enabled = configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'yes'
# If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled was not changed,
# determine if the Ranger/Kafka plug-in enabled enabled or not
elif 'ranger-kafka-plugin-properties' in services['configurations'] and \
'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
ranger_plugin_enabled = services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'yes'
# Determine the value for kafka-broker/authorizer.class.name
if ranger_plugin_enabled:
# If the Ranger plugin for Kafka is enabled, set authorizer.class.name to
# "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer" whether Kerberos is
# enabled or not.
putKafkaBrokerProperty("authorizer.class.name", 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer')
elif security_enabled:
putKafkaBrokerProperty("authorizer.class.name", 'kafka.security.auth.SimpleAclAuthorizer')
else:
putKafkaBrokerAttributes('authorizer.class.name', 'delete', 'true')
#If AMS is part of Services, use the KafkaTimelineMetricsReporter for metric reporting. Default is ''.
if "AMBARI_METRICS" in servicesList:
putKafkaBrokerProperty('kafka.metrics.reporters', 'org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter')
if ranger_plugin_enabled:
kafkaLog4jRangerLines = [{
"name": "log4j.appender.rangerAppender",
"value": "org.apache.log4j.DailyRollingFileAppender"
},
{
"name": "log4j.appender.rangerAppender.DatePattern",
"value": "'.'yyyy-MM-dd-HH"
},
{
"name": "log4j.appender.rangerAppender.File",
"value": "${kafka.logs.dir}/ranger_kafka.log"
},
{
"name": "log4j.appender.rangerAppender.layout",
"value": "org.apache.log4j.PatternLayout"
},
{
"name": "log4j.appender.rangerAppender.layout.ConversionPattern",
"value": "%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n"
},
{
"name": "log4j.logger.org.apache.ranger",
"value": "INFO, rangerAppender"
}]
# change kafka-log4j when ranger plugin is installed
if 'kafka-log4j' in services['configurations'] and 'content' in services['configurations']['kafka-log4j']['properties']:
kafkaLog4jContent = services['configurations']['kafka-log4j']['properties']['content']
for item in range(len(kafkaLog4jRangerLines)):
if kafkaLog4jRangerLines[item]["name"] not in kafkaLog4jContent:
kafkaLog4jContent+= '\n' + kafkaLog4jRangerLines[item]["name"] + '=' + kafkaLog4jRangerLines[item]["value"]
putKafkaLog4jProperty("content",kafkaLog4jContent)
zookeeper_host_port = self.getZKHostPortString(services)
if zookeeper_host_port:
putRangerKafkaPluginProperty = self.putProperty(configurations, 'ranger-kafka-plugin-properties', services)
putRangerKafkaPluginProperty('zookeeper.connect', zookeeper_host_port)
def recommendKAFKAConfigurationsFromHDP26(self, configurations, clusterData, services, hosts):
if 'kafka-env' in services['configurations'] and 'kafka_user' in services['configurations']['kafka-env']['properties']:
kafka_user = services['configurations']['kafka-env']['properties']['kafka_user']
else:
kafka_user = "kafka"
if 'ranger-kafka-plugin-properties' in configurations and 'ranger-kafka-plugin-enabled' in configurations['ranger-kafka-plugin-properties']['properties']:
ranger_kafka_plugin_enabled = (configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
elif 'ranger-kafka-plugin-properties' in services['configurations'] and 'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
ranger_kafka_plugin_enabled = (services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'Yes'.lower())
else:
ranger_kafka_plugin_enabled = False
if ranger_kafka_plugin_enabled and 'ranger-kafka-plugin-properties' in services['configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
self.logger.info("Setting Kafka Repo user for Ranger.")
putRangerKafkaPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
putRangerKafkaPluginProperty("REPOSITORY_CONFIG_USERNAME",kafka_user)
else:
self.logger.info("Not setting Kafka Repo user for Ranger.")
class KafkaValidator(service_advisor.ServiceAdvisor):
"""
Kafka Validator checks the correctness of properties whenever the service is first added or the user attempts to
change configs via the UI.
"""
def __init__(self, *args, **kwargs):
self.as_super = super(KafkaValidator, self)
self.as_super.__init__(*args, **kwargs)
self.validators = [("ranger-kafka-plugin-properties", self.validateKafkaRangerPluginConfigurationsFromHDP22),
("kafka-broker", self.validateKAFKAConfigurationsFromHDP23)]
def validateKafkaRangerPluginConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = []
ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-kafka-plugin-properties")
ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
security_enabled = self.isSecurityEnabled(services)
if 'RANGER' in servicesList and ranger_plugin_enabled.lower() == 'yes':
# ranger-hdfs-plugin must be enabled in ranger-env
ranger_env = self.getServicesSiteProperties(services, 'ranger-env')
if not ranger_env or not 'ranger-kafka-plugin-enabled' in ranger_env or \
ranger_env['ranger-kafka-plugin-enabled'].lower() != 'yes':
validationItems.append({"config-name": 'ranger-kafka-plugin-enabled',
"item": self.getWarnItem(
"ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled must correspond ranger-env/ranger-kafka-plugin-enabled")})
if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'yes') and not security_enabled:
validationItems.append({"config-name": "ranger-kafka-plugin-enabled",
"item": self.getWarnItem(
"Ranger Kafka plugin should not be enabled in non-kerberos environment.")})
return self.toConfigurationValidationProblems(validationItems, "ranger-kafka-plugin-properties")
def validateKAFKAConfigurationsFromHDP23(self, properties, recommendedDefaults, configurations, services, hosts):
kafka_broker = properties
validationItems = []
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
#Adding Ranger Plugin logic here
ranger_plugin_properties = self.getSiteProperties(configurations, "ranger-kafka-plugin-properties")
ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
prop_name = 'authorizer.class.name'
prop_val = "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer"
if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
if kafka_broker[prop_name] != prop_val:
validationItems.append({"config-name": prop_name,
"item": self.getWarnItem(
"If Ranger Kafka Plugin is enabled." \
"{0} needs to be set to {1}".format(prop_name,prop_val))})
if 'KERBEROS' in servicesList and 'security.inter.broker.protocol' in properties:
interBrokerValue = properties['security.inter.broker.protocol']
prop_name = 'listeners'
prop_value = properties[prop_name]
if interBrokerValue and interBrokerValue not in prop_value:
validationItems.append({"config-name": "listeners",
"item": self.getWarnItem("If kerberos is enabled " \
"{0} need to contain {1} as one of " \
"the protocol".format(prop_name, interBrokerValue))})
return self.toConfigurationValidationProblems(validationItems, "kafka-broker")
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import cgi
import os
import re
import uuid
from collections import defaultdict, namedtuple
from pants.base.build_environment import get_buildroot
from pants.base.mustache import MustacheRenderer
from pants.base.workunit import WorkUnit
from pants.reporting.linkify import linkify
from pants.reporting.report import Report
from pants.reporting.reporter import Reporter
from pants.reporting.reporting_utils import items_to_report_element
from pants.util.dirutil import safe_mkdir
class HtmlReporter(Reporter):
"""HTML reporting to files.
The files are intended to be served by the ReportingServer,
not accessed directly from the filesystem.
"""
# HTML reporting settings.
# html_dir: Where the report files go.
# template_dir: Where to find mustache templates.
Settings = namedtuple('Settings', Reporter.Settings._fields + ('html_dir', 'template_dir'))
def __init__(self, run_tracker, settings):
Reporter.__init__(self, run_tracker, settings)
# The main report, and associated tool outputs, go under this dir.
self._html_dir = settings.html_dir
# We render HTML from mustache templates.
self._renderer = MustacheRenderer(settings.template_dir, __name__)
# We serve files relative to the build root.
self._buildroot = get_buildroot()
self._html_path_base = os.path.relpath(self._html_dir, self._buildroot)
# We write the main report body to this file object.
self._report_file = None
# We redirect stdout, stderr etc. of tool invocations to these files.
self._output_files = defaultdict(dict) # workunit_id -> {path -> fileobj}.
def report_path(self):
"""The path to the main report file."""
return os.path.join(self._html_dir, 'build.html')
def open(self):
"""Implementation of Reporter callback."""
safe_mkdir(os.path.dirname(self._html_dir))
self._report_file = open(self.report_path(), 'w')
def close(self):
"""Implementation of Reporter callback."""
self._report_file.close()
# Make sure everything's closed.
for files in self._output_files.values():
for f in files.values():
f.close()
def start_workunit(self, workunit):
"""Implementation of Reporter callback."""
# We use these properties of the workunit to decide how to render information about it.
is_bootstrap = workunit.has_label(WorkUnit.BOOTSTRAP)
is_tool = workunit.has_label(WorkUnit.TOOL)
is_multitool = workunit.has_label(WorkUnit.MULTITOOL)
is_test = workunit.has_label(WorkUnit.TEST)
# Get useful properties from the workunit.
workunit_dict = workunit.to_dict()
if workunit_dict['cmd']:
workunit_dict['cmd'] = linkify(self._buildroot, workunit_dict['cmd'].replace('$', '\\\\$'))
# Create the template arguments.
args = { 'indent': len(workunit.ancestors()) * 10,
'html_path_base': self._html_path_base,
'workunit': workunit_dict,
'header_text': workunit.name,
'initially_open': is_test or not (is_bootstrap or is_tool or is_multitool),
'is_tool': is_tool,
'is_multitool': is_multitool }
args.update({ 'collapsible': lambda x: self._renderer.render_callable('collapsible', x, args) })
# Render the workunit's div.
s = self._renderer.render_name('workunit_start', args)
if is_tool:
# This workunit is a tool invocation, so render the appropriate content.
# We use the same args, slightly modified.
del args['initially_open']
if is_test:
# Have test framework stdout open by default, but not that of other tools.
# This is an arbitrary choice, but one that turns out to be useful to users in practice.
args['stdout_initially_open'] = True
s += self._renderer.render_name('tool_invocation_start', args)
# ... and we're done.
self._emit(s)
# CSS classes from pants.css that we use to style the header text to reflect the outcome.
_outcome_css_classes = ['aborted', 'failure', 'warning', 'success', 'unknown']
def end_workunit(self, workunit):
"""Implementation of Reporter callback."""
# Create the template arguments.
duration = workunit.duration()
timing = '%.3f' % duration
unaccounted_time = None
# Background work may be idle a lot, no point in reporting that as unaccounted.
if self.is_under_main_root(workunit):
unaccounted_time_secs = workunit.unaccounted_time()
if unaccounted_time_secs >= 1 and unaccounted_time_secs > 0.05 * duration:
unaccounted_time = '%.3f' % unaccounted_time_secs
args = { 'workunit': workunit.to_dict(),
'status': workunit.choose(*HtmlReporter._outcome_css_classes),
'timing': timing,
'unaccounted_time': unaccounted_time,
'aborted': workunit.outcome() == WorkUnit.ABORTED }
s = ''
if workunit.has_label(WorkUnit.TOOL):
s += self._renderer.render_name('tool_invocation_end', args)
s += self._renderer.render_name('workunit_end', args)
self._emit(s)
# Update the timings.
def render_timings(timings):
timings_dict = timings.get_all()
for item in timings_dict:
item['timing_string'] = '%.3f' % item['timing']
args = {
'timings': timings_dict
}
return self._renderer.render_name('aggregated_timings', args)
self._overwrite('cumulative_timings', render_timings(self.run_tracker.cumulative_timings))
self._overwrite('self_timings', render_timings(self.run_tracker.self_timings))
# Update the artifact cache stats.
def render_cache_stats(artifact_cache_stats):
def fix_detail_id(e, _id):
return e if isinstance(e, basestring) else e + (_id, )
msg_elements = []
for cache_name, stat in artifact_cache_stats.stats_per_cache.items():
msg_elements.extend([
cache_name + ' artifact cache: ',
# Explicitly set the detail ids, so their displayed/hidden state survives a refresh.
fix_detail_id(items_to_report_element(stat.hit_targets, 'hit'), 'cache-hit-details'),
', ',
fix_detail_id(items_to_report_element(stat.miss_targets, 'miss'), 'cache-miss-details'),
'.'
])
if not msg_elements:
msg_elements = ['No artifact cache use.']
return self._render_message(*msg_elements)
self._overwrite('artifact_cache_stats',
render_cache_stats(self.run_tracker.artifact_cache_stats))
for f in self._output_files[workunit.id].values():
f.close()
def handle_output(self, workunit, label, s):
"""Implementation of Reporter callback."""
if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.
path = os.path.join(self._html_dir, '%s.%s' % (workunit.id, label))
output_files = self._output_files[workunit.id]
if path not in output_files:
f = open(path, 'w')
output_files[path] = f
else:
f = output_files[path]
f.write(self._htmlify_text(s).encode('utf-8'))
# We must flush in the same thread as the write.
f.flush()
_log_level_css_map = {
Report.FATAL: 'fatal',
Report.ERROR: 'error',
Report.WARN: 'warn',
Report.INFO: 'info',
Report.DEBUG: 'debug'
}
def do_handle_log(self, workunit, level, *msg_elements):
"""Implementation of Reporter callback."""
content = '<span class="%s">%s</span>' % \
(HtmlReporter._log_level_css_map[level], self._render_message(*msg_elements))
# Generate some javascript that appends the content to the workunit's div.
args = {
'content_id': uuid.uuid4(), # Identifies this content.
'workunit_id': workunit.id, # The workunit this reporting content belongs to.
'content': content, # The content to append.
}
s = self._renderer.render_name('append_to_workunit', args)
# Emit that javascript to the main report body.
self._emit(s)
def _render_message(self, *msg_elements):
elements = []
detail_ids = []
for element in msg_elements:
# Each element can be a message or a (message, detail) pair, as received by handle_log().
#
# However, as an internal implementation detail, we also allow an element to be a tuple
# (message, detail, detail_initially_visible[, detail_id])
#
# - If the detail exists, clicking on the text will toggle display of the detail and close
# all other details in this message.
# - If detail_initially_visible is True, the detail will be displayed by default.
#
# Toggling is managed via detail_ids: when clicking on a detail, it closes all details
# in this message with detail_ids different than that of the one being clicked on.
# We allow detail_id to be explicitly specified, so that the open/closed state can be
# preserved through refreshes. For example, when looking at the artifact cache stats,
# if "hits" are open and "misses" are closed, we want to remember that even after
# the cache stats are updated and the message re-rendered.
if isinstance(element, basestring):
element = [element]
defaults = ('', None, None, False)
# Map assumes None for missing values, so this will pick the default for those.
(text, detail, detail_id, detail_initially_visible) = \
map(lambda x, y: x or y, element, defaults)
element_args = {'text': self._htmlify_text(text) }
if detail is not None:
detail_id = detail_id or uuid.uuid4()
detail_ids.append(detail_id)
element_args.update({
'detail': self._htmlify_text(detail),
'detail_initially_visible': detail_initially_visible,
'detail-id': detail_id
})
elements.append(element_args)
args = { 'elements': elements,
'all-detail-ids': detail_ids }
return self._renderer.render_name('message', args)
def _emit(self, s):
"""Append content to the main report file."""
if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.
self._report_file.write(s)
self._report_file.flush() # We must flush in the same thread as the write.
def _overwrite(self, filename, s):
"""Overwrite a file with the specified contents."""
if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.
with open(os.path.join(self._html_dir, filename), 'w') as f:
f.write(s)
def _htmlify_text(self, s):
"""Make text HTML-friendly."""
colored = self._handle_ansi_color_codes(cgi.escape(s.decode('utf-8')))
return linkify(self._buildroot, colored).replace('\n', '</br>')
_ANSI_COLOR_CODE_RE = re.compile(r'\033\[((?:\d|;)*)m')
def _handle_ansi_color_codes(self, s):
"""Replace ansi escape sequences with spans of appropriately named css classes."""
parts = HtmlReporter._ANSI_COLOR_CODE_RE.split(s)
ret = []
span_depth = 0
# Note that len(parts) is always odd: text, code, text, code, ..., text.
for i in range(0, len(parts), 2):
ret.append(parts[i])
if i + 1 < len(parts):
for code in parts[i + 1].split(';'):
if code == 0: # Reset.
while span_depth > 0:
ret.append('</span>')
span_depth -= 1
else:
ret.append('<span class="ansi-%s">' % code)
span_depth += 1
while span_depth > 0:
ret.append('</span>')
span_depth -= 1
return ''.join(ret)
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Produces configured shell abstractions.
This module knows how to produce a configured shell abstraction based on
shell_config.ShellConfig.
"""
import os.path
import sys
import urlparse
from devtoolslib.android_shell import AndroidShell
from devtoolslib.linux_shell import LinuxShell
from devtoolslib.shell_config import ShellConfigurationException
# When spinning up servers for local origins, we want to use predictable ports
# so that caching works between subsequent runs with the same command line.
_LOCAL_ORIGIN_PORT = 31840
_MAPPINGS_BASE_PORT = 31841
def _is_web_url(dest):
return True if urlparse.urlparse(dest).scheme else False
def _host_local_url_destination(shell, dest_file, port):
"""Starts a local server to host |dest_file|.
Returns:
Url of the hosted file.
"""
directory = os.path.dirname(dest_file)
if not os.path.exists(directory):
raise ValueError('local path passed as --map-url destination '
'does not exist')
server_url = shell.serve_local_directory(directory, port)
return server_url + os.path.relpath(dest_file, directory)
def _host_local_origin_destination(shell, dest_dir, port):
"""Starts a local server to host |dest_dir|.
Returns:
Url of the hosted directory.
"""
return shell.serve_local_directory(dest_dir, port)
def _rewrite(mapping, host_destination_functon, shell, port):
"""Takes a mapping given as <src>=<dest> and rewrites the <dest> part to be
hosted locally using the given function if <dest> is not a web url.
"""
parts = mapping.split('=')
if len(parts) != 2:
raise ValueError('each mapping value should be in format '
'"<url>=<url-or-local-path>"')
if _is_web_url(parts[1]):
# The destination is a web url, do nothing.
return mapping
src = parts[0]
dest = host_destination_functon(shell, parts[1], port)
return src + '=' + dest
def _apply_mappings(shell, original_arguments, map_urls, map_origins):
"""Applies mappings for specified urls and origins. For each local path
specified as destination a local server will be spawned and the mapping will
be rewritten accordingly.
Args:
shell: The shell that is being configured.
original_arguments: Current list of shell arguments.
map_urls: List of url mappings, each in the form of
<url>=<url-or-local-path>.
map_origins: List of origin mappings, each in the form of
<origin>=<url-or-local-path>.
Returns:
The updated argument list.
"""
next_port = _MAPPINGS_BASE_PORT
args = original_arguments
if map_urls:
# Sort the mappings to preserve caching regardless of argument order.
for map_url in sorted(map_urls):
mapping = _rewrite(map_url, _host_local_url_destination, shell, next_port)
next_port += 1
# All url mappings need to be coalesced into one shell argument.
args = append_to_argument(args, '--url-mappings=', mapping)
if map_origins:
for map_origin in sorted(map_origins):
mapping = _rewrite(map_origin, _host_local_origin_destination, shell,
next_port)
next_port += 1
# Origin mappings are specified as separate, repeated shell arguments.
args.append('--map-origin=' + mapping)
return args
def configure_local_origin(shell, local_dir, fixed_port=True):
"""Sets up a local http server to serve files in |local_dir| along with
device port forwarding if needed.
Returns:
The list of arguments to be appended to the shell argument list.
"""
origin_url = shell.serve_local_directory(
local_dir, _LOCAL_ORIGIN_PORT if fixed_port else 0)
return ["--origin=" + origin_url]
def append_to_argument(arguments, key, value, delimiter=","):
"""Looks for an argument of the form "key=val1,val2" within |arguments| and
appends |value| to it.
If the argument is not present in |arguments| it is added.
Args:
arguments: List of arguments for the shell.
key: Identifier of the argument, including the equal sign, eg.
"--content-handlers=".
value: The value to be appended, after |delimeter|, to the argument.
delimiter: The string used to separate values within the argument.
Returns:
The updated argument list.
"""
assert key and key.endswith('=')
assert value
for i, argument in enumerate(arguments):
if not argument.startswith(key):
continue
arguments[i] = argument + delimiter + value
break
else:
arguments.append(key + value)
return arguments
def _configure_dev_server(shell, shell_args, dev_server_config):
"""Sets up a dev server on the host according to |dev_server_config|.
Args:
shell: The shell that is being configured.
shell_arguments: Current list of shell arguments.
dev_server_config: Instance of shell_config.DevServerConfig describing the
dev server to be set up.
Returns:
The updated argument list.
"""
server_url = shell.serve_local_directories(dev_server_config.mappings)
shell_args.append('--map-origin=%s=%s' % (dev_server_config.host, server_url))
print "Configured %s locally to serve:" % (dev_server_config.host)
for mapping_prefix, mapping_path in dev_server_config.mappings:
print " /%s -> %s" % (mapping_prefix, mapping_path)
return shell_args
def get_shell(shell_config, shell_args):
"""
Produces a shell abstraction configured according to |shell_config|.
Args:
shell_config: Instance of shell_config.ShellConfig.
shell_args: Additional raw shell arguments to be passed to the shell. We
need to take these into account as some parameters need to appear only
once on the argument list (e.g. url-mappings) so we need to coalesce any
overrides and the existing value into just one argument.
Returns:
A tuple of (shell, shell_args). |shell| is the configured shell abstraction,
|shell_args| is updated list of shell arguments.
Throws:
ShellConfigurationException if shell abstraction could not be configured.
"""
if shell_config.android:
verbose_pipe = sys.stdout if shell_config.verbose else None
shell = AndroidShell(shell_config.adb_path, shell_config.target_device,
logcat_tags=shell_config.logcat_tags,
verbose_pipe=verbose_pipe)
device_status, error = shell.check_device()
if not device_status:
raise ShellConfigurationException('Device check failed: ' + error)
if shell_config.shell_path:
shell.install_apk(shell_config.shell_path)
else:
if not shell_config.shell_path:
raise ShellConfigurationException('Can not run without a shell binary. '
'Please pass --shell-path.')
shell = LinuxShell(shell_config.shell_path)
if shell_config.use_osmesa:
shell_args.append('--args-for=mojo:native_viewport_service --use-osmesa')
shell_args = _apply_mappings(shell, shell_args, shell_config.map_url_list,
shell_config.map_origin_list)
if shell_config.origin:
if _is_web_url(shell_config.origin):
shell_args.append('--origin=' + shell_config.origin)
else:
shell_args.extend(configure_local_origin(shell, shell_config.origin,
fixed_port=True))
if shell_config.content_handlers:
for (mime_type,
content_handler_url) in shell_config.content_handlers.iteritems():
shell_args = append_to_argument(shell_args, '--content-handlers=',
'%s,%s' % (mime_type,
content_handler_url))
for dev_server_config in shell_config.dev_servers:
shell_args = _configure_dev_server(shell, shell_args, dev_server_config)
return shell, shell_args
| |
from __future__ import absolute_import
import re
from functools import partial
from typing import Any, Callable, Text
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.models import Client, UserProfile
from zerver.decorator import api_key_only_webhook_view, REQ, has_request_variables
from zerver.lib.webhooks.git import get_issue_event_message, SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\
get_pull_request_event_message, SUBJECT_WITH_BRANCH_TEMPLATE,\
get_push_commits_event_message, CONTENT_MESSAGE_TEMPLATE,\
get_commits_comment_action_message, get_push_tag_event_message
class UnknownEventType(Exception):
pass
def get_opened_or_update_pull_request_body(payload):
# type: (Dict[str, Any]) -> Text
pull_request = payload['pull_request']
action = payload['action']
if action == 'synchronized':
action = 'updated'
assignee = None
if pull_request.get('assignee'):
assignee = pull_request['assignee']['login']
return get_pull_request_event_message(
get_sender_name(payload),
action,
pull_request['html_url'],
target_branch=pull_request['head']['ref'],
base_branch=pull_request['base']['ref'],
message=pull_request['body'],
assignee=assignee
)
def get_closed_pull_request_body(payload):
# type: (Dict[str, Any]) -> Text
pull_request = payload['pull_request']
action = 'merged' if pull_request['merged'] else 'closed without merge'
return get_pull_request_event_message(
get_sender_name(payload),
action,
pull_request['html_url'],
)
def get_membership_body(payload):
# type: (Dict[str, Any]) -> Text
action = payload['action']
member = payload['member']
scope = payload['scope']
scope_object = payload[scope]
return u"{} {} [{}]({}) to {} {}".format(
get_sender_name(payload),
action,
member['login'],
member['html_url'],
scope_object['name'],
scope
)
def get_member_body(payload):
# type: (Dict[str, Any]) -> Text
return u"{} {} [{}]({}) to [{}]({})".format(
get_sender_name(payload),
payload['action'],
payload['member']['login'],
payload['member']['html_url'],
get_repository_name(payload),
payload['repository']['html_url']
)
def get_issue_body(payload):
# type: (Dict[str, Any]) -> Text
action = payload['action']
issue = payload['issue']
assignee = issue['assignee']
return get_issue_event_message(
get_sender_name(payload),
action,
issue['html_url'],
issue['number'],
issue['body'],
assignee=assignee['login'] if assignee else None
)
def get_issue_comment_body(payload):
# type: (Dict[str, Any]) -> Text
action = payload['action']
comment = payload['comment']
issue = payload['issue']
if action == 'created':
action = '[commented]'
else:
action = '{} a [comment]'
action += '({}) on'.format(comment['html_url'])
return get_issue_event_message(
get_sender_name(payload),
action,
issue['html_url'],
issue['number'],
comment['body'],
)
def get_fork_body(payload):
# type: (Dict[str, Any]) -> Text
forkee = payload['forkee']
return u"{} forked [{}]({})".format(
get_sender_name(payload),
forkee['name'],
forkee['html_url']
)
def get_deployment_body(payload):
# type: (Dict[str, Any]) -> Text
return u'{} created new deployment'.format(
get_sender_name(payload),
)
def get_change_deployment_status_body(payload):
# type: (Dict[str, Any]) -> Text
return u'Deployment changed status to {}'.format(
payload['deployment_status']['state'],
)
def get_create_or_delete_body(payload, action):
# type: (Dict[str, Any], Text) -> Text
ref_type = payload['ref_type']
return u'{} {} {} {}'.format(
get_sender_name(payload),
action,
ref_type,
payload['ref']
).rstrip()
def get_commit_comment_body(payload):
# type: (Dict[str, Any]) -> Text
comment = payload['comment']
comment_url = comment['html_url']
commit_url = comment_url.split('#', 1)[0]
action = u'[commented]({})'.format(comment_url)
return get_commits_comment_action_message(
get_sender_name(payload),
action,
commit_url,
comment.get('commit_id'),
comment['body'],
)
def get_push_tags_body(payload):
# type: (Dict[str, Any]) -> Text
return get_push_tag_event_message(
get_sender_name(payload),
get_tag_name_from_ref(payload['ref']),
action='pushed' if payload.get('created') else 'removed'
)
def get_push_commits_body(payload):
# type: (Dict[str, Any]) -> Text
commits_data = [{
'sha': commit['id'],
'url': commit['url'],
'message': commit['message']
} for commit in payload['commits']]
return get_push_commits_event_message(
get_sender_name(payload),
payload['compare'],
get_branch_name_from_ref(payload['ref']),
commits_data
)
def get_public_body(payload):
# type: (Dict[str, Any]) -> Text
return u"{} made [the repository]({}) public".format(
get_sender_name(payload),
payload['repository']['html_url'],
)
def get_wiki_pages_body(payload):
# type: (Dict[str, Any]) -> Text
wiki_page_info_template = u"* {action} [{title}]({url})\n"
wiki_info = u''
for page in payload['pages']:
wiki_info += wiki_page_info_template.format(
action=page['action'],
title=page['title'],
url=page['html_url'],
)
return u"{}:\n{}".format(get_sender_name(payload), wiki_info.rstrip())
def get_watch_body(payload):
# type: (Dict[str, Any]) -> Text
return u"{} starred [the repository]({})".format(
get_sender_name(payload),
payload['repository']['html_url']
)
def get_repository_body(payload):
# type: (Dict[str, Any]) -> Text
return u"{} {} [the repository]({})".format(
get_sender_name(payload),
payload.get('action'),
payload['repository']['html_url']
)
def get_add_team_body(payload):
# type: (Dict[str, Any]) -> Text
return u"[The repository]({}) was added to team {}".format(
payload['repository']['html_url'],
payload['team']['name']
)
def get_release_body(payload):
# type: (Dict[str, Any]) -> Text
return u"{} published [the release]({})".format(
get_sender_name(payload),
payload['release']['html_url'],
)
def get_page_build_body(payload):
# type: (Dict[str, Any]) -> Text
build = payload['build']
action = build['status']
if action == 'null':
action = u'has yet to be built'
elif action == 'building':
action = u'is being building'
elif action == 'errored':
action = u'is errored{}'.format(
CONTENT_MESSAGE_TEMPLATE.format(message=build['error']['message'])
)
else:
action = u'is {}'.format(action)
return u"Github Pages build, trigerred by {}, {}".format(
payload['build']['pusher']['login'],
action
)
def get_status_body(payload):
# type: (Dict[str, Any]) -> Text
if payload['target_url']:
status = '[{}]({})'.format(
payload['state'],
payload['target_url']
)
else:
status = payload['state']
return u"[{}]({}) changed it's status to {}".format(
payload['sha'][:7], # TODO
payload['commit']['html_url'],
status
)
def get_pull_request_review_body(payload):
# type: (Dict[str, Any]) -> Text
return get_pull_request_event_message(
get_sender_name(payload),
'submitted',
payload['review']['html_url'],
type='PR Review'
)
def get_pull_request_review_comment_body(payload):
# type: (Dict[str, Any]) -> Text
action = payload['action']
message = None
if action == 'created':
message = payload['comment']['body']
return get_pull_request_event_message(
get_sender_name(payload),
action,
payload['comment']['html_url'],
message=message,
type='PR Review Comment'
)
def get_repository_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['repository']['name']
def get_sender_name(payload):
# type: (Dict[str, Any]) -> Text
return payload['sender']['login']
def get_branch_name_from_ref(ref_string):
# type: (Text) -> Text
return re.sub(r'^refs/heads/', '', ref_string)
def get_tag_name_from_ref(ref_string):
# type: (Text) -> Text
return re.sub(r'^refs/tags/', '', ref_string)
def is_commit_push_event(payload):
# type: (Dict[str, Any]) -> bool
return bool(re.match(r'^refs/heads/', payload['ref']))
def get_subject_based_on_type(payload, event):
# type: (Dict[str, Any], Text) -> Text
if 'pull_request' in event:
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload),
type='PR',
id=payload['pull_request']['number'],
title=payload['pull_request']['title']
)
elif event.startswith('issue'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repository_name(payload),
type='Issue',
id=payload['issue']['number'],
title=payload['issue']['title']
)
elif event.startswith('deployment'):
return u"{} / Deployment on {}".format(
get_repository_name(payload),
payload['deployment']['environment']
)
elif event == 'membership':
return u"{} organization".format(payload['organization']['login'])
elif event == 'push_commits':
return SUBJECT_WITH_BRANCH_TEMPLATE.format(
repo=get_repository_name(payload),
branch=get_branch_name_from_ref(payload['ref'])
)
elif event == 'gollum':
return SUBJECT_WITH_BRANCH_TEMPLATE.format(
repo=get_repository_name(payload),
branch='Wiki Pages'
)
return get_repository_name(payload)
EVENT_FUNCTION_MAPPER = {
'team_add': get_add_team_body,
'commit_comment': get_commit_comment_body,
'closed_pull_request': get_closed_pull_request_body,
'create': partial(get_create_or_delete_body, action='created'),
'delete': partial(get_create_or_delete_body, action='deleted'),
'deployment': get_deployment_body,
'deployment_status': get_change_deployment_status_body,
'fork': get_fork_body,
'gollum': get_wiki_pages_body,
'issue_comment': get_issue_comment_body,
'issue': get_issue_body,
'member': get_member_body,
'membership': get_membership_body,
'opened_or_update_pull_request': get_opened_or_update_pull_request_body,
'page_build': get_page_build_body,
'public': get_public_body,
'pull_request_review': get_pull_request_review_body,
'pull_request_review_comment': get_pull_request_review_comment_body,
'push_commits': get_push_commits_body,
'push_tags': get_push_tags_body,
'release': get_release_body,
'repository': get_repository_body,
'status': get_status_body,
'watch': get_watch_body,
}
@api_key_only_webhook_view('Github')
@has_request_variables
def api_github_webhook(
request, user_profile, client,
payload=REQ(argument_type='body'), stream=REQ(default='github')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text) -> HttpResponse
event = get_event(request, payload)
subject = get_subject_based_on_type(payload, event)
body = get_body_function_based_on_type(event)(payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_event(request, payload):
# type: (HttpRequest, Dict[str, Any]) -> str
event = request.META['HTTP_X_GITHUB_EVENT']
if event == 'pull_request':
action = payload['action']
if action == 'opened' or action == 'synchronized':
return 'opened_or_update_pull_request'
if action == 'closed':
return 'closed_pull_request'
raise UnknownEventType(u'Event pull_request with {} action is unsupported'.format(action))
if event == 'push':
if is_commit_push_event(payload):
return "push_commits"
else:
return "push_tags"
elif event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnknownEventType(u'Event {} is unknown and cannot be handled'.format(event))
def get_body_function_based_on_type(type):
# type: (str) -> Any
return EVENT_FUNCTION_MAPPER.get(type)
| |
import flask
from flask import Flask, render_template, request
import json
import time
app = Flask(__name__)
form_post_count = 0
# Ensure application exceptions are raised.
app.debug = True
class AppError(Exception):
pass
@app.route("/")
def hello():
resp = flask.make_response("""Hello world! <a href="with_html">Relative</a>""")
resp.set_cookie("capybara", "root cookie")
return resp
@app.route("/foo")
def foo():
return "Another World"
@app.route("/redirect", methods=["GET", "POST"])
def redirect():
return flask.redirect("/redirect_again")
@app.route("/redirect_again")
def redirect_again():
return flask.redirect("/landed")
@app.route("/redirect_307", methods=["POST"])
def redirect_307():
return flask.redirect("/landed", code=307)
@app.route("/redirect_308", methods=["POST"])
def redirect_308():
return flask.redirect("/landed", code=308)
@app.route("/referrer_base")
def referrer_base():
return """
<a href="/get_referrer">direct link</a>
<a href="/redirect_to_get_referrer">link via redirect</a>
<form action="/get_referrer" method="get"><input type="submit"></form>
"""
@app.route("/redirect_to_get_referrer")
def redirect_to_get_referrer():
return flask.redirect("/get_referrer")
@app.route("/get_referrer")
def get_referrer():
return "No referrer" if request.referrer is None else "Got referrer: {0}".format(request.referrer)
@app.route("/host")
def host():
return "Current host is {0}://{1}".format(request.scheme, request.host)
@app.route("/redirect/<int:times>/times")
def redirect_n_times(times):
if times == 0:
return "redirection complete"
else:
return flask.redirect("/redirect/{0}/times".format(times - 1))
@app.route("/landed", methods=["GET"])
def landed():
return "You landed"
@app.route("/landed", methods=["POST"])
def post_landed():
return "You post landed: {}".format(request.form.get('form[data]'))
@app.route("/with-quotes")
def with_quotes():
return "\"No,\" he said, \"you can't do that.\""
@app.route("/form/get", methods=["GET"])
@app.route("/relative", methods=["POST"])
@app.route("/form", methods=["POST"])
def results():
global form_post_count
form_post_count += 1
data = request.args.copy()
data.update(request.form)
data.update({"post_count": form_post_count})
return """<pre id="results">""" + json.dumps(data.to_dict(flat=False)) + """</pre>"""
@app.route("/favicon.ico")
def favicon():
return ""
@app.route("/delete", methods=["DELETE"])
def deleted():
return "The requested object was deleted"
@app.route("/delete", methods=["GET"])
def not_deleted():
return "Not deleted"
@app.route("/redirect_back")
def redirect_back():
return flask.redirect(request.referrer)
@app.route("/redirect_secure")
def redirect_secure():
return flask.redirect("http://{0}/host".format(request.host))
@app.route("/slow_response")
def slow_response():
time.sleep(2)
return "Finally!"
@app.route("/set_cookie")
def set_cookie():
cookie_value = "test_cookie"
resp = flask.make_response("Cookie set to {0}".format(cookie_value))
resp.set_cookie("capybara", cookie_value)
return resp
@app.route("/get_cookie")
def get_cookie():
return request.cookies.get("capybara", "")
@app.route("/get_header")
def get_header():
return request.headers.get("Foo", "")
@app.route("/get_header_via_redirect")
def get_header_via_redirect():
return flask.redirect("/get_header")
@app.route("/error")
def error():
raise AppError()
@app.route("/import_error")
def import_error():
raise ImportError("Simulated ImportError")
@app.route("/with_html")
def with_html():
return render_template("with_html.html")
@app.route("/with_simple_html")
def with_simple_html():
return render_template("with_simple_html.html")
@app.route("/<name>")
def view(name):
return render_template("{}.html".format(name))
@app.route("/upload_empty", methods=["POST"])
def upload_empty():
f = request.files.get("form[file]")
if not f:
return "Successfully ignored empty file field."
else:
return "Something went wrong."
@app.route("/upload", methods=["POST"])
def upload():
document = request.files.get("form[document]")
if document and document.filename:
buf = []
buf.append("Content-type: {0}".format(document.mimetype))
buf.append("File content: {0}".format(document.read()))
return " | ".join(buf)
else:
return "No file uploaded"
@app.route("/upload_multiple", methods=["POST"])
def upload_multiple():
documents = request.files.getlist("form[multiple_documents][]")
documents = [doc for doc in documents if doc.filename]
if len(documents):
buf = [str(len(documents))]
for document in documents:
buf.append("Content-type: {0}".format(document.mimetype))
buf.append("File content: {0}".format(document.read()))
return " | ".join(buf)
else:
return "No files uploaded"
if __name__ == "__main__":
app.run()
| |
import collections
import enum
import functools
from datetime import datetime
import discord
from discord.ext import commands
from more_itertools import one
from ..utils import db, time
from ..utils.examples import static_example
from ..utils.formats import multi_replace
from ..utils.misc import nice_time, ordinal
class ServerMessages(db.Table, table_name='server_messages'):
guild_id = db.Column(db.BigInt)
is_welcome = db.Column(db.Boolean)
channel_id = db.Column(db.BigInt, nullable=False)
message = db.Column(db.Text, nullable=True)
delete_after = db.Column(db.SmallInt, default=0)
enabled = db.Column(db.Boolean, default=False)
_DEFAULT_CHANNEL_CHANGE_URL = ('https://github.com/discordapp/discord-api-docs/blob/master/docs/'
'Change_Log.md#breaking-change-default-channels')
fields = 'guild_id is_welcome channel_id message delete_after enabled'.split()
ServerMessage = collections.namedtuple('ServerMessage', fields)
ServerMessage.__new__.__defaults__ = (None, ) * len(fields)
del fields
_server_message_check = functools.partial(commands.has_permissions, manage_guild=True)
class ServerMessageType(enum.Enum):
leave = False
welcome = True
def __str__(self):
return self.name
@property
def action(self):
return _lookup[self][0]
@property
def past_tense(self):
return _lookup[self][1]
@property
def command_name(self):
return _lookup[self][2]
@property
def toggle_text(self):
return _lookup[self][3]
_lookup = {
ServerMessageType.leave: ('leaves', 'left', 'bye', 'mourn the loss of members ;-;'),
ServerMessageType.welcome: ('joins', 'joined', 'welcome', 'welcome all new members to the server! ^o^')
}
@static_example
def special_message(message):
return message if '{user}' in message else f'{{user}}{message}'
class WelcomeMessages:
"""Commands related to welcome and leave messages."""
# TODO: Put this in a config module.
def __init__(self, bot):
self.bot = bot
# ------------ config helper functions --------------------
async def _get_server_config(self, guild_id, thing, *, connection=None):
connection = connection or self.bot.pool
query = "SELECT * FROM server_messages WHERE guild_id = $1 AND is_welcome = $2"
row = await connection.fetchrow(query, guild_id, thing.value)
return ServerMessage(**row) if row else None
async def _update_server_config(self, ctx, thing, **kwarg):
column, value = one(kwarg.items())
query = f"""INSERT INTO server_messages (guild_id, is_welcome, {column})
VALUES ($1, $2, $3)
ON CONFLICT (guild_id, is_welcome)
DO UPDATE SET {column} = $3
"""
await ctx.db.execute(query, ctx.guild.id, thing.value, value)
async def _show_server_config(self, ctx, thing):
config = await self._get_server_config(ctx.guild.id, thing, connection=ctx.db)
if not config:
commands = sorted(ctx.command.commands, key=str)
message = ("Um... you haven't even set this at all...\n"
f"Please use one of the {len(commands)} subcommands to get started.")
embed = discord.Embed(colour=0xf44336, description=message)
for c in commands:
embed.add_field(name=f'{ctx.prefix} {c}', value=c.short_doc)
return await ctx.send(embed=embed)
colour, prefix = (0x4CAF50, 'en') if config.enabled else (0xf44336, 'dis')
message = (f'**Message:**\n{config.message}'
if config.message else
f"Set one using `{thing.command_name} message`.")
embed = (discord.Embed(colour=colour, description=message)
.set_author(name=f'{thing.name.title()} Status: {prefix}abled')
)
ch_id = config.channel_id
if ch_id == -1:
ch_field = f"Set a channel using `{thing.command_name} channel channel`."
else:
channel = ctx.bot.get_channel(ch_id)
if channel:
ch_field = channel.mention
else:
ch_field = (
"Deleted.\nSet a new one using\n"
f"`{ctx.clean_prefix}{thing.command_name} channel your_channel`"
)
embed.add_field(name='Channel', value=ch_field, inline=False)
if config.delete_after > 0:
embed.add_field(
name='Message will be deleted after',
value=time.duration_units(config.delete_after),
inline=False
)
await ctx.send(embed=embed)
async def _toggle_config(self, ctx, do_thing, *, thing):
if do_thing is None:
await self._show_server_config(ctx, thing)
else:
await self._update_server_config(ctx, thing, enabled=do_thing)
to_say = (f"Yay I will {thing.toggle_text}" if do_thing else
"Oki I'll just sit in my corner then :~")
await ctx.send(to_say)
async def _message_config(self, ctx, message, *, thing):
if message:
await self._update_server_config(ctx, thing, message=message)
await ctx.send(f"{thing.name.title()} message has been set to *{message}*")
else:
config = await self._get_server_config(ctx.guild.id, thing, connection=ctx.db)
to_say = (f"I will say {config.message} to the user."
if (config and config.message) else
"I won't say anything...")
await ctx.send(to_say)
async def _channel_config(self, ctx, channel, *, thing):
if channel:
await self._update_server_config(ctx, thing, channel_id=channel.id)
await ctx.send(f'Ok, {channel.mention} it is then!')
else:
config = await self._get_server_config(ctx.guild.id, thing, connection=ctx.db)
channel = self.bot.get_channel(getattr(config, 'channel_id', None))
if channel:
message = f"I'm gonna say the {thing} message in {channel.mention}"
else:
message = ("I don't have a channel at the moment, "
f"set one with `{ctx.prefix}{ctx.command} my_channel`")
await ctx.send(message)
async def _delete_after_config(self, ctx, duration, *, thing):
if duration is None:
config = await self._get_server_config(ctx.guild.id, thing, connection=ctx.db)
duration = config.delete_after if config else 0
message = (f"I won't delete the {thing} message." if duration < 0 else
f"I will delete the {thing} message after {time.duration_units(duration)}.")
await ctx.send(message)
else:
await self._update_server_config(ctx, thing, delete_after=duration)
message = (f"Ok, I'm deleting the {thing} message after {time.duration_units(duration)}"
if duration > 0 else
f"Ok, I won't delete the {thing} message.")
await ctx.send(message)
# --------------------- commands -----------------------
def _do_command(*, thing):
_toggle_help = f"""
Sets whether or not I announce when someone {thing.action}s the server.
Specifying with no arguments will toggle it.
"""
_channel_help = f"""
Sets the channel where I will {thing}.
If no arguments are given, it shows the current channel.
This **must** be specified due to the fact that default channels
are no longer a thing. ([see here]({_DEFAULT_CHANNEL_CHANGE_URL}))
If this isn't specified, or the channel was deleted, the message
will not show.
"""
_delete_after_help = f"""
Sets the time it takes for {thing} messages to be auto-deleted.
Passing it with no arguments will return the current duration.
A number less than or equal 0 will disable automatic deletion.
"""
_message_help = f"""
Sets the bot's message when a member {thing.action}s this server.
The following special formats can be in the message:
`{{{{user}}}}` = The member that {thing.past_tense}. If one isn't placed,
it's placed at the beginning of the message.
`{{{{uid}}}}` = The ID of member that {thing.past_tense}.
`{{{{server}}}}` = The name of the server.
`{{{{count}}}}` = How many members are in the server now.
`{{{{countord}}}}` = Like `{{{{count}}}}`, but as an ordinal,
(e.g. instead of `5` it becomes `5th`.)
`{{{{time}}}}` = The date and time when the member {thing.past_tense}.
"""
@commands.group(name=thing.command_name, help=_toggle_help, invoke_without_command=True)
@_server_message_check()
async def group(self, ctx, enable: bool = None):
await self._toggle_config(ctx, enable, thing=thing)
@group.command(name='message', help=_message_help)
@_server_message_check()
async def group_message(self, ctx, *, message: special_message):
await self._message_config(ctx, message, thing=thing)
@group.command(name='channel', help=_channel_help)
@_server_message_check()
async def group_channel(self, ctx, *, channel: discord.TextChannel):
await self._channel_config(ctx, channel, thing=thing)
@group.command(name='delete', help=_delete_after_help)
@_server_message_check()
async def group_delete(self, ctx, *, duration: int):
await self._delete_after_config(ctx, duration, thing=thing)
return group, group_message, group_channel, group_delete
welcome, welcome_message, welcome_channel, welcome_delete = _do_command(
thing=ServerMessageType.welcome,
)
bye, bye_message, bye_channel, bye_delete = _do_command(
thing=ServerMessageType.leave,
)
# ----------------- events ------------------------
async def _maybe_do_message(self, member, thing, time):
guild = member.guild
config = await self._get_server_config(guild.id, thing)
if not (config and config.enabled):
return
channel_id = config.channel_id
channel = self.bot.get_channel(channel_id)
if channel is None:
return
message = config.message
if not message:
return
member_count = guild.member_count
replacements = {
'{user}': member.mention,
'{uid}': str(member.id),
'{server}': str(guild),
'{count}': str(member_count),
'{countord}': ordinal(member_count),
# TODO: Should I use %c...?
'{time}': nice_time(time)
}
delete_after = config.delete_after
if delete_after <= 0:
delete_after = None
# Not using str.format because that will raise KeyError on anything surrounded in {}
message = multi_replace(message, replacements)
await channel.send(message, delete_after=delete_after)
async def on_member_join(self, member):
await self._maybe_do_message(member, ServerMessageType.welcome, member.joined_at)
# Hm, this needs less repetition
# XXX: Lower the repetition
async def on_member_remove(self, member):
await self._maybe_do_message(member, ServerMessageType.leave, datetime.utcnow())
def setup(bot):
bot.add_cog(WelcomeMessages(bot))
| |
# -*- coding: utf-8 -*-
import re
import struct
import msgpack
from threading import Lock
import tarantool
def unpack_long_long(value):
return struct.unpack("<q", value)[0]
def unpack_long(value):
return struct.unpack("<l", value)[0]
class Task(object):
"""
Tarantool queue task wrapper.
.. warning::
Don't instantiate it with your bare hands
"""
def __init__(self, queue, space=0, task_id=0,
tube="", status="", raw_data=None):
self.task_id = task_id
self.tube = tube
self.status = status
self.raw_data = raw_data
self.space = space
self.queue = queue
self.modified = False
def ack(self):
"""
Confirm completion of a task. Before marking a task as complete
:rtype: `Task` instance
"""
self.modified = True
return self.queue._ack(self.task_id)
def release(self, **kwargs):
"""
Return a task back to the queue: the task is not executed.
:param ttl: new time to live
:param delay: new delay for task
:type ttl: int
:type delay: int
:rtype: `Task` instance
"""
self.modified = True
return self.queue._release(self.task_id, **kwargs)
def delete(self):
"""
Delete a task from the queue (regardless of task state or status).
:rtype: boolean
"""
self.modified = True
return self.queue._delete(self.task_id)
def requeue(self):
"""
Return a task to the queue, the task is not executed.
Puts the task at the end of the queue, so that it's
executed only after all existing tasks in the queue are
executed.
:rtype: boolean
"""
self.modified = True
return self.queue._requeue(self.task_id)
def done(self, data):
"""
Mark a task as complete (done), but don't delete it.
Replaces task data with the supplied data.
:param data: Data for pushing into queue
:rtype: boolean
"""
self.modified = True
the_tuple = self.tnt.call("queue.done", (
str(self.queue.space),
str(task_id),
self.tube.serialize(data))
)
return the_tuple.return_code == 0
def bury(self):
"""
Mark a task as buried. This special status excludes the
task from the active list, until it's dug up. This function
is useful when several attempts to execute a task lead to a
failure. Buried tasks can be monitored by the queue owner,
and treated specially.
:rtype: boolean
"""
self.modified = True
return self.queue._bury(self.task_id)
def dig(self):
"""
'Dig up' a buried task, after checking that the task is buried.
The task status is changed to ready.'
:rtype: boolean
"""
self.modified = True
return self.queue._dig(self.task_id)
def meta(self):
"""
Return unpacked task metadata.
:rtype: dict with metainformation or None
"""
return self.queue._meta(self.task_id)
def touch(self):
"""
Prolong living time for taken task with this id.
:rtype: boolean
"""
return self.queue._touch(self.task_id)
@property
def data(self):
if not self.raw_data:
return None
if not hasattr(self, '_decoded_data'):
self._decoded_data = self.queue.tube(
self.tube).deserialize(self.raw_data)
return self._decoded_data
def __str__(self):
args = (
self.task_id, self.tube, self.status, self.space
)
return "Task (id: {0}, tube:{1}, status: {2}, space:{3})".format(*args)
def __del__(self):
if self.status == 'taken' and not self.modified:
self.release()
@classmethod
def from_tuple(cls, queue, the_tuple):
if the_tuple is None:
return
if the_tuple.rowcount < 1:
raise Queue.ZeroTupleException('error creating task')
row = the_tuple[0]
return cls(
queue,
space=queue.space,
task_id=row[0],
tube=row[1],
status=row[2],
raw_data=row[3],
)
class Tube(object):
"""
Tarantol queue tube wrapper. Pinned to space and tube, but unlike Queue
it has predefined delay, ttl, ttr, and pri.
.. warning::
Don't instantiate it with your bare hands
"""
def __init__(self, queue, name, **kwargs):
self.queue = queue
self.name = name
self.opt = {
'delay': 0,
'ttl': 0,
'ttr': 0,
'pri': 0,
'tube': name
}
self.opt.update(kwargs)
self._serialize = None
self._deserialize = None
#----------------
@property
def serialize(self):
"""
Serialize function: must be Callable or None. Sets None when deleted
"""
if self._serialize is None:
return self.queue.serialize
return self._serialize
@serialize.setter
def serialize(self, func):
if not (hasattr(func, '__call__') or func is None):
raise TypeError("func must be Callable "
"or None, but not " + str(type(func)))
self._serialize = func
@serialize.deleter
def serialize(self):
self._serialize = None
#----------------
@property
def deserialize(self):
"""
Deserialize function: must be Callable or None. Sets None when deleted
"""
if self._deserialize is None:
return self.queue.deserialize
return self._deserialize
@deserialize.setter
def deserialize(self, func):
if not (hasattr(func, '__call__') or func is None):
raise TypeError("func must be Callable "
"or None, but not " + str(type(func)))
self._deserialize = func
@deserialize.deleter
def deserialize(self):
self._deserialize = None
#----------------
def update_options(self, **kwargs):
"""
Update options for current tube (such as ttl, ttr, pri and delay)
"""
self.opt.update(kwargs)
def put(self, data, **kwargs):
"""
Enqueue a task. Returns a tuple, representing the new task.
The list of fields with task data ('...')is optional.
If urgent set to True then the task will get the highest priority.
:param data: Data for pushing into queue
:param urgent: make task urgent (Not necessary, False by default)
:param delay: new delay for task (Not necessary, Default of Tube object)
:param ttl: new time to live (Not necessary, Default of Tube object)
:param ttr: time to release (Not necessary, Default of Tube object)
:param tube: name of Tube (Not necessary, Default of Tube object)
:param pri: priority (Not necessary, Default of Tube object)
:type ttl: int
:type delay: int
:type ttr: int
:type tube: string
:type urgent: boolean
:rtype: `Task` instance
"""
opt = dict(self.opt, **kwargs)
method = "queue.tube.%s.put" % self.name
if "urgent" in kwargs and kwargs["urgent"]:
opt["delay"] = 0
method = "queue.%s.urgent" % self.name
the_tuple = self.queue.tnt.call(method, (
str(self.queue.space),
str(opt["tube"]),
str(opt["delay"]),
str(opt["ttl"]),
str(opt["ttr"]),
str(opt["pri"]),
self.serialize(data))
)
return Task.from_tuple(self.queue, the_tuple)
def urgent(self, data=None, **kwargs):
"""
Same as :meth:`Tube.put() <tarantool_queue.Tube.put>` put, but set highest priority for this task.
"""
kwargs['urgent'] = True
return self.put(data, **dict(self.opt, **kwargs))
def take(self, timeout=0):
"""
If there are tasks in the queue ready for execution,
take the highest-priority task. Otherwise, wait for a
ready task to appear in the queue, and, as soon as it appears,
mark it as taken and return to the consumer. If there is a
timeout, and the task doesn't appear until the timeout expires,
return 'None'. If timeout is None, wait indefinitely until
a task appears.
:param timeout: timeout to wait.
:type timeout: int or None
:rtype: `Task` instance or None
"""
return self.queue._take(self.opt['tube'], timeout)
def kick(self, count=None):
"""
'Dig up' count tasks in a queue. If count is not given, digs up
just one buried task.
:rtype boolean
"""
return self.queue._kick(self.opt['tube'], count)
def statistics(self):
"""
See :meth:`Queue.statistics() <tarantool_queue.Queue.statistics>` for more information.
"""
return self.queue.statistics(tube=self.opt['tube'])
class Queue(object):
"""
Tarantool queue wrapper. Surely pinned to space. May create tubes.
By default it uses msgpack for serialization, but you may redefine
serialize and deserialize methods.
You must use Queue only for creating Tubes.
For more usage, please, look into tests.
Usage:
>>> from tntqueue import Queue
>>> queue = Queue()
>>> tube1 = queue.create_tube('holy_grail', ttl=100, delay=5)
# Put task into the queue
>>> tube1.put([1, 2, 3])
# Put task into the beggining of queue (Highest priority)
>>> tube1.urgent([2, 3, 4])
>>> tube1.get() # We get task and automaticaly release it
>>> task1 = tube1.take()
>>> task2 = tube1.take()
>>> print(task1.data)
[2, 3, 4]
>>> print(task2.data)
[1, 2, 3]
>>> del task2
>>> del task1
>>> print(tube1.take().data)
[1, 2, 3]
# Take task and Ack it
>>> tube1.take().ack()
True
"""
DataBaseError = tarantool.DatabaseError
NetworkError = tarantool.NetworkError
class BadConfigException(Exception):
pass
class ZeroTupleException(Exception):
pass
@staticmethod
def basic_serialize(data):
return msgpack.packb(data)
@staticmethod
def basic_deserialize(data):
return msgpack.unpackb(data)
def __init__(self, host="localhost", port=33013, space=0):
if not(host and port):
raise Queue.BadConfigException("host and port params "
"must be not empty")
if not isinstance(port, int):
raise Queue.BadConfigException("port must be int")
if not isinstance(space, int):
raise Queue.BadConfigException("space must be int")
self.host = host
self.port = port
self.space = space
# self.schema = schema
self._instance_lock = Lock()
self.tubes = {}
self._serialize = self.basic_serialize
self._deserialize = self.basic_deserialize
#----------------
@property
def serialize(self):
"""
Serialize function: must be Callable or None. Sets None when deleted.
"""
if self._serialize is None:
return self.queue.serialize
return self._serialize
@serialize.setter
def serialize(self, func):
if not (hasattr(func, '__call__') or func is None):
raise TypeError("func must be Callable "
"or None, but not " + str(type(func)))
self._serialize = func
@serialize.deleter
def serialize(self):
self._serialize = self.basic_serialize
#----------------
@property
def deserialize(self):
"""
Deserialize function: must be Callable or None. Sets None when deleted
"""
if self._deserialize is None:
return self.queue.deserialize
return self._deserialize
@deserialize.setter
def deserialize(self, func):
if not (hasattr(func, '__call__') or func is None):
raise TypeError("func must be Callable "
"or None, but not " + str(type(func)))
self._deserialize = func
@deserialize.deleter
def deserialize(self):
self._deserialize = self.basic_deserialize
#----------------
@property
def tnt(self):
with self._instance_lock:
if not hasattr(self, '_tnt'):
self._tnt = tarantool.connect(self.host, self.port)
return self._tnt
def _take(self, tube, timeout=0):
args = [str(self.space), str(tube)]
if timeout is not None:
args.append(str(timeout))
the_tuple = self.tnt.call("queue.take", tuple(args))
if the_tuple.rowcount == 0:
return None
return Task.from_tuple(self, the_tuple)
def _ack(self, task_id):
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.ack", args)
return the_tuple.return_code == 0
def _release(self, task_id, delay=0, ttl=0):
the_tuple = self.tnt.call("queue.release", (
str(self.space),
str(task_id),
str(delay),
str(ttl)
))
return Task.from_tuple(self, the_tuple)
def _requeue(self, task_id):
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.requeue", args)
return the_tuple.return_code == 0
def _bury(self, task_id):
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.bury", args)
return the_tuple.return_code == 0
def _delete(self, task_id):
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.delete", args)
return the_tuple.return_code == 0
def _meta(self, task_id):
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.meta", args)
if the_tuple.rowcount:
row = list(the_tuple[0])
for index in [3, 7, 8, 9, 10, 11, 12]:
row[index] = unpack_long_long(row[index])
for index in [6]:
row[index] = unpack_long(row[index])
keys = [
'task_id', 'tube', 'status', 'event', 'ipri',
'pri', 'cid', 'created', 'ttl', 'ttr', 'cbury',
'ctaken', 'now'
]
return dict(zip(keys, row))
return None
def peek(self, task_id):
"""
Return a task by task id.
:param task_id: UUID of task in HEX
:type task_id: string
:rtype: `Task` instance
"""
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.peek", args)
return Task.from_tuple(self, the_tuple)
def _dig(self, task_id):
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.dig", args)
return the_tuple.return_code == 0
def _kick(self, tube, count=None):
args = [str(self.space), str(tube)]
if count:
args.append(str(count))
the_tuple = self.tnt.call("queue.kick", tuple(args))
return the_tuple.return_code == 0
def statistics(self, tube=None):
"""
Return queue module statistics accumulated since server start.
Output format: if tube is not None, then output is dictionary with
stats of current tube. If tube is None, then output is dict of
t stats, ...}
e.g.:
>>> tube.statistics()
# or queue.statistics('tube0')
# or queue.statistics(tube.opt['tube'])
{'ack': '233',
'meta': '35',
'put': '153',
'release': '198',
'take': '431',
'take_timeout': '320',
'tasks': {'buried': '0',
'delayed': '0',
'done': '0',
'ready': '0',
'taken': '0',
'total': '0'},
'urgent': '80'}
or
>>> queue.statistics()
{'tube0': {'ack': '233',
'meta': '35',
'put': '153',
'release': '198',
'take': '431',
'take_timeout': '320',
'tasks': {'buried': '0',
'delayed': '0',
'done': '0',
'ready': '0',
'taken': '0',
'total': '0'},
'urgent': '80'}}
:param tube: Name of tube
:type tube: string or None
:rtype: dict with statistics
"""
args = (str(self.space),)
args = args if tube is None else args + (tube,)
stat = self.tnt.call("queue.statistics", args)
ans = {}
if stat.rowcount > 0:
for k, v in dict(zip(stat[0][0::2], stat[0][1::2])).iteritems():
k_t = re.split('space([^.]*)\.(.*)\.([^.]*)', k)[1:3]
task = False
if int(k_t[0]) != self.space:
continue
if k_t[-1] in ('total', 'ready', 'delayed', 'taken', 'buried', 'done'):
k_t = map((lambda x: x[::-1]), k_t[1]
[::-1].split('.', 1))[::-1] + k_t[2:3]
task = True
if not (k_t[1] in ans):
ans[k_t[1]] = {'tasks': {}}
if task:
ans[k_t[1]]['tasks'][k_t[-1]] = v
else:
ans[k_t[1]][k_t[-1]] = v
return ans[tube] if tube else ans
def _touch(self, task_id):
args = (str(self.space), task_id)
the_tuple = self.tnt.call("queue.touch", tuple(args))
return the_tuple.return_code == 0
def tube(self, name, **kwargs):
"""
Create Tube object, if not created before, and set kwargs.
If existed, return existed Tube.
:param name: name of Tube
:param delay: default delay for Tube tasks (Not necessary, will be 0)
:param ttl: default TTL for Tube tasks (Not necessary, will be 0)
:param ttr: default TTR for Tube tasks (Not necessary, will be 0)
:param pri: default priority for Tube tasks (Not necessary)
:type name: string
:type ttl: int
:type delay: int
:type ttr: int
:type pri: int
:rtype: `Tube` instance
"""
if name in self.tubes:
tube = self.tubes[name]
tube.update_options(**kwargs)
else:
tube = Tube(self, name, **kwargs)
self.tubes[name] = tube
return tube
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parameterized unit tests for quantizing a Tensorflow graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.contrib.quantize.python import quantize
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
batch_norm = layers.batch_norm
conv2d = layers.conv2d
fully_connected = layers.fully_connected
separable_conv2d = layers.separable_conv2d
class QuantizeTest(test_util.TensorFlowTestCase):
def _RunWithoutBatchNormTestOverParameters(self, test_fn):
# TODO(suharshs): Use parameterized test once OSS TF supports it.
parameters_list = [
# (activation, activation_op_name, with_bypass, delay)
(nn_ops.relu6, 'Relu6', False, None),
(nn_ops.relu, 'Relu', False, None),
(array_ops.identity, 'Identity', False, None),
(nn_ops.relu6, 'Relu6', False, 5000),
(nn_ops.relu, 'Relu', False, 5000),
(array_ops.identity, 'Identity', False, 5000),
(nn_ops.relu6, 'Relu6', True, None),
(nn_ops.relu, 'Relu', True, None),
(array_ops.identity, 'Identity', True, None),
(nn_ops.relu6, 'Relu6', True, 5000),
(nn_ops.relu, 'Relu', True, 5000),
(array_ops.identity, 'Identity', True, 5000),
]
for params in parameters_list:
# Test everything with resource variables and normal variables.
test_fn(params[0], params[1], params[2], params[3], False)
test_fn(params[0], params[1], params[2], params[3], True)
def _AssertCorrectQuantizedGraphWithoutBatchNorm(
self, graph, scope, layer, activation_op_name, with_bypass, delay,
use_resource):
quantization_node_name = 'FakeQuantWithMinMaxVars'
weights_quant = graph.get_operation_by_name(scope + '/weights_quant/' +
quantization_node_name)
self.assertEqual(weights_quant.type, quantization_node_name)
# Assemble the expected inputs.
if use_resource:
expected_inputs = [
scope + '/weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + '/weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
if layer == 'DepthwiseConv2dNative':
expected_inputs.append(scope + '/depthwise/ReadVariableOp')
else:
expected_inputs.append(scope + '/' + layer + '/ReadVariableOp')
else:
expected_inputs = [
scope + '/weights_quant/AssignMinLast',
scope + '/weights_quant/AssignMaxLast',
]
if layer == 'DepthwiseConv2dNative':
expected_inputs.append(scope + '/depthwise_weights/read')
else:
expected_inputs.append(scope + '/weights/read')
self._AssertInputOpsAre(weights_quant, expected_inputs)
if delay and delay > 0:
output_op_name = scope + '/weights_quant/delayed_quant/Switch_1'
else:
if layer == 'DepthwiseConv2dNative':
output_op_name = scope + '/depthwise'
else:
output_op_name = scope + '/' + layer
self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])
if with_bypass:
conv_quant = graph.get_operation_by_name(scope + '/conv_quant/' +
quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
scope + '/conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + '/conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
scope + '/BiasAdd',
]
else:
expected_inputs = [
scope + '/conv_quant/AssignMinEma',
scope + '/conv_quant/AssignMaxEma', scope + '/BiasAdd'
]
self._AssertInputOpsAre(conv_quant, expected_inputs)
output_op_name = (scope + '/conv_quant/delayed_quant/Switch_1'
if delay else 'test/Add')
self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])
act_quant = graph.get_operation_by_name('test/act_quant/' +
quantization_node_name)
self.assertEqual(act_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
'test/act_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
'test/act_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
'test/' + activation_op_name,
]
else:
expected_inputs = [
'test/act_quant/AssignMinEma', 'test/act_quant/AssignMaxEma',
'test/' + activation_op_name
]
self._AssertInputOpsAre(act_quant, expected_inputs)
output_op_name = ('test/act_quant/delayed_quant/Switch_1'
if delay else 'control_dependency')
self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
self._AssertIdempotent(graph)
def testQuantize_Conv2dWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_Conv2dWithoutBatchNorm)
def _TestQuantize_Conv2dWithoutBatchNorm(self, activation, activation_op_name,
with_bypass, delay, use_resource):
"""Tests quantization: inputs -> Conv2d no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
out_depth = 3 if with_bypass else 32
activation_fn = None if with_bypass else activation
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_FCWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_FCWithoutBatchNorm)
def _TestQuantize_FCWithoutBatchNorm(self, activation, activation_op_name,
with_bypass, delay, use_resource):
"""Tests quantization: inputs -> FC no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
activation_fn = None if with_bypass else activation
scope = 'test/test2' if with_bypass else 'test'
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=activation_fn,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'MatMul', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_DepthwiseConv2dWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_DepthwiseConv2dWithoutBatchNorm)
def _TestQuantize_DepthwiseConv2dWithoutBatchNorm(
self, activation, activation_op_name, with_bypass, delay, use_resource):
"""Tests quantization: inputs -> DWConv2d no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else activation
scope = 'test/test2' if with_bypass else 'test'
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
delay, use_resource)
def testQuantize_AtrousConvWithoutBatchNorm(self):
self._RunWithoutBatchNormTestOverParameters(
self._TestQuantize_AtrousConvWithoutBatchNorm)
def _TestQuantize_AtrousConvWithoutBatchNorm(
self, activation, activation_op_name, with_bypass, delay, use_resource):
"""Tests quantization: inputs -> atrous conv no batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
dilation_rate = 2
activation_fn = None if with_bypass else activation
scope = 'test/test2' if with_bypass else 'test'
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithoutBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name, with_bypass,
delay, use_resource)
def _RunBatchNormTestOverParameters(self, test_fn):
# TODO(suharshs): Use parameterized test once OSS TF supports it.
parameters_list = [
# (activation, activation_op_name, with_bypass, delay, fused_batch_norm)
(nn_ops.relu6, 'Relu6', False, None, False),
(nn_ops.relu, 'Relu', False, None, False),
(array_ops.identity, 'Identity', False, None, False),
(nn_ops.relu6, 'Relu6', False, 5000, False),
(nn_ops.relu, 'Relu', False, 5000, False),
(array_ops.identity, 'Identity', False, 5000, False),
(nn_ops.relu6, 'Relu6', True, None, False),
(nn_ops.relu, 'Relu', True, None, False),
(array_ops.identity, 'Identity', True, None, False),
(nn_ops.relu6, 'Relu6', True, 5000, False),
(nn_ops.relu, 'Relu', True, 5000, False),
(array_ops.identity, 'Identity', True, 5000, False),
(nn_ops.relu6, 'Relu6', False, None, True),
(nn_ops.relu, 'Relu', False, None, True),
(array_ops.identity, 'Identity', False, None, True),
(nn_ops.relu6, 'Relu6', False, 5000, True),
(nn_ops.relu, 'Relu', False, 5000, True),
(array_ops.identity, 'Identity', False, 5000, True),
(nn_ops.relu6, 'Relu6', True, None, True),
(nn_ops.relu, 'Relu', True, None, True),
(array_ops.identity, 'Identity', True, None, True),
(nn_ops.relu6, 'Relu6', True, 5000, True),
(nn_ops.relu, 'Relu', True, 5000, True),
(array_ops.identity, 'Identity', True, 5000, True)
]
for params in parameters_list:
# Test everything with resource variables and normal variables.
test_fn(params[0], params[1], params[2], params[3], params[4], False)
test_fn(params[0], params[1], params[2], params[3], params[4], True)
def _AssertCorrectQuantizedGraphWithBatchNorm(self, graph, scope, layer,
activation_op_name, with_bypass,
delay, use_resource):
quantization_node_name = 'FakeQuantWithMinMaxVars'
weights_quant = graph.get_operation_by_name(
scope + '/weights_quant/' + quantization_node_name)
self.assertEqual(weights_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
scope + '/weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + '/weights_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
scope + '/weights_quant/' + 'AssignMinLast',
scope + '/weights_quant/' + 'AssignMaxLast'
]
expected_inputs.append(scope + '/mul_fold')
self._AssertInputOpsAre(weights_quant, expected_inputs)
if layer == 'DepthwiseConv2dNative':
output_op_name = scope + ('/weights_quant/delayed_quant/Switch_1'
if delay else '/depthwise_Fold')
else:
output_op_name = scope + ('/weights_quant/delayed_quant/Switch_1'
if delay else '/' + layer + '_Fold')
self._AssertOutputGoesToOps(weights_quant, graph, [output_op_name])
if with_bypass:
conv_quant = graph.get_operation_by_name(
scope + '/conv_quant/' + quantization_node_name)
self.assertEqual(conv_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
scope + '/conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
scope + '/conv_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
scope + '/conv_quant/AssignMinEma',
scope + '/conv_quant/AssignMaxEma',
]
expected_inputs.append(scope + '/add_fold')
self._AssertInputOpsAre(conv_quant, expected_inputs)
output_op_name = (
scope + '/conv_quant/delayed_quant/Switch_1' if delay else 'test/Add')
self._AssertOutputGoesToOps(conv_quant, graph, [output_op_name])
act_quant = graph.get_operation_by_name(
'test/act_quant/' + quantization_node_name)
self.assertEqual(act_quant.type, quantization_node_name)
if use_resource:
expected_inputs = [
'test/act_quant/FakeQuantWithMinMaxVars/ReadVariableOp',
'test/act_quant/FakeQuantWithMinMaxVars/ReadVariableOp_1',
]
else:
expected_inputs = [
'test/act_quant/AssignMinEma',
'test/act_quant/AssignMaxEma',
]
expected_inputs.append('test/' + activation_op_name)
self._AssertInputOpsAre(act_quant, expected_inputs)
output_op_name = ('test/act_quant/delayed_quant/Switch_1'
if delay else 'control_dependency')
self._AssertOutputGoesToOps(act_quant, graph, [output_op_name])
self._AssertIdempotent(graph)
def testQuantize_Conv2dWithBatchNorm(self):
self._RunBatchNormTestOverParameters(self._TestQuantize_Conv2dWithBatchNorm)
def _TestQuantize_Conv2dWithBatchNorm(self, activation, activation_op_name,
with_bypass, delay, fused_batch_norm,
use_resource):
"""Tests quantization: inputs -> Conv2d with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
out_depth = 3 if with_bypass else 32
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'Conv2D', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_FCWithBatchNorm(self):
self._RunBatchNormTestOverParameters(self._TestQuantize_FCWithBatchNorm)
def _TestQuantize_FCWithBatchNorm(self, activation, activation_op_name,
with_bypass, delay, fused_batch_norm,
use_resource):
"""Tests quantization: inputs -> FC with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
scope = 'test/test2' if with_bypass else 'test'
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'MatMul', activation_op_name, with_bypass, delay,
use_resource)
def testQuantize_DepthwiseConv2dWithBatchNorm(self):
self._RunBatchNormTestOverParameters(
self._TestQuantize_DepthwiseConv2dWithBatchNorm)
def _TestQuantize_DepthwiseConv2dWithBatchNorm(
self, activation, activation_op_name, with_bypass, delay,
fused_batch_norm, use_resource):
"""Tests quantization: inputs -> DWConv2d with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
stride = 1 if with_bypass else 2
scope = 'test/test2' if with_bypass else 'test'
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name,
with_bypass, delay, use_resource)
def testQuantize_AtrousConvWithBatchNorm(self):
self._RunBatchNormTestOverParameters(
self._TestQuantize_AtrousConvWithBatchNorm)
def _TestQuantize_AtrousConvWithBatchNorm(
self, activation, activation_op_name, with_bypass, delay,
fused_batch_norm, use_resource):
"""Tests quantization: inputs -> atrous conv with batch norm -> Activation.
Args:
activation: Callable that returns an Operation, a factory method for the
Activation.
activation_op_name: String, name of the Activation operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Activation.
delay: Int (optional), delay in number of steps until quantization starts.
fused_batch_norm: Bool, when true use FusedBatchNorm.
use_resource: Bool, when true uses resource variables.
"""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
inputs = array_ops.zeros((batch_size, height, width, depth))
dilation_rate = 2
scope = 'test/test2' if with_bypass else 'test'
node = separable_conv2d(
inputs,
None, [3, 3],
rate=dilation_rate,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=None,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(fused_batch_norm),
scope=scope)
# Manually add a bypass (optional) and an activation.
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
node = activation(node, name='test/' + activation_op_name)
update_barrier = control_flow_ops.no_op(name='update_barrier')
with ops.control_dependencies([update_barrier]):
array_ops.identity(node, name='control_dependency')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True, quant_delay=delay)
self._AssertCorrectQuantizedGraphWithBatchNorm(
graph, scope, 'DepthwiseConv2dNative', activation_op_name,
with_bypass, delay, use_resource)
def _AssertIdempotent(self, graph):
# Ensure that calling the rewrite again doesn't change the graph.
graph_def_before = str(graph.as_graph_def())
with graph.as_default():
# Ensuring that calling the rewrite again doesn't add more nodes.
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, True)
graph_def_after = str(graph.as_graph_def())
self.assertEqual(graph_def_before, graph_def_after)
def testBatchNormForcedUpdates(self):
parameter_list = [
# (activation, activation_op_name, fused_batch_norm)
(nn_ops.relu6, 'Relu6', False),
(nn_ops.relu, 'Relu', False),
(array_ops.identity, 'Identity', False),
(nn_ops.relu6, 'Relu6', True),
(nn_ops.relu, 'Relu', True),
(array_ops.identity, 'Identity', True),
]
for params in parameter_list:
self._TestBatchNormForcedUpdates(params[0], params[1], params[2], False)
self._TestBatchNormForcedUpdates(params[0], params[1], params[2], True)
def _TestBatchNormForcedUpdates(self, activation, activation_op_name,
fused_batch_norm, use_resource):
"""post_activation bypass quantization should happen with forced updates."""
graph = ops.Graph()
with graph.as_default():
variable_scope.get_variable_scope().set_use_resource(use_resource)
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros((batch_size, height, width, depth))
input2 = array_ops.zeros((batch_size, height / 2, width / 2, 32))
# Setting updates_collections to None forces updates adding an extra
# identity operation following batch norms.
bn_params = self._BatchNormParams(
fused=fused_batch_norm, force_updates=True)
conv = conv2d(
input1,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation,
normalizer_fn=batch_norm,
normalizer_params=bn_params,
scope='test/test')
bypass_tensor = math_ops.add(conv, input2, name='test/add')
# The output of the post_activation bypass will be another layer.
_ = conv2d(
bypass_tensor,
32, [5, 5],
stride=2,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
normalizer_fn=batch_norm,
normalizer_params=bn_params,
activation_fn=activation,
scope='test/unused')
fold_batch_norms.FoldBatchNorms(graph, is_training=True)
quantize.Quantize(graph, is_training=True)
# Ensure that the bypass node is preceded by and followed by a
# FakeQuantWithMinMaxVar operation, since the output of the Add isn't an
# activation.
self.assertTrue('FakeQuantWithMinMaxVars' in
[c.type for c in bypass_tensor.consumers()])
self.assertTrue('FakeQuantWithMinMaxVars' in
[i.op.type for i in bypass_tensor.op.inputs])
with open('/tmp/bn_quant_test.pbtxt', 'w') as f:
f.write(str(graph.as_graph_def()))
def _BatchNormParams(self, fused=False, force_updates=False):
params = {
'center': True,
'scale': True,
'decay': 1.0 - 0.003,
'fused': fused
}
if force_updates:
params['updates_collections'] = None
return params
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initialized that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
def _AssertInputOpsAre(self, op, in_op_names):
"""Asserts that all inputs to op come from in_op_names (disregarding order).
Args:
op: Operation to check inputs for.
in_op_names: List of strings, operations where all op's inputs should
come from.
"""
expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names]
self.assertItemsEqual([t.name for t in op.inputs], expected_inputs)
def _AssertOutputGoesToOps(self, op, graph, out_op_names):
"""Asserts that outputs from op go to out_op_names (and perhaps others).
Args:
op: Operation to check outputs for.
graph: Graph where output operations are located.
out_op_names: List of strings, operations where op's outputs should go.
"""
for out_op_name in out_op_names:
out_op = graph.get_operation_by_name(out_op_name)
self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs])
if __name__ == '__main__':
googletest.main()
| |
#!/usr/bin/env python
"""
@package coverage_model.parameter_expressions
@file coverage_model/parameter_expressions.py
@author Christopher Mueller
@brief Classes for holding expressions evaluated against parameters
"""
from ooi.logging import log
import numpy as np
import numexpr as ne
import os
from numbers import Number
from collections import OrderedDict
from coverage_model.basic_types import AbstractBase
from coverage_model.parameter_data import NumpyDictParameterData
class ParameterFunctionException(Exception):
def __init__(self, message, original_type=None):
self.original_type = original_type
if self.original_type is not None:
message = '{0} :: original_type = {1}'.format(message, str(original_type))
Exception.__init__(self, message)
class AbstractFunction(AbstractBase):
def __init__(self, name, arg_list, param_map):
AbstractBase.__init__(self)
self.name = name
self.arg_list = arg_list
self.param_map = param_map
def _apply_mapping(self):
if self.param_map is not None:
keyset = set(self.param_map.keys())
argset = set(self.arg_list)
if not keyset.issubset(argset):
log.warn('\'param_map\' does not contain keys for all items in \'arg_list\'; '
'arg will be used for missing keys = %s', keyset.difference(argset))
args = self.arg_list
vals = [self.param_map[a] if a in self.param_map else a for a in self.arg_list]
else:
args = vals = self.arg_list
return OrderedDict(zip(args, vals))
@classmethod
def _get_map_name(cls, a, n):
if a is None or a == '':
return n
else:
return '{0} :|: {1}'.format(a, n)
@classmethod
def _parse_map_name(cls, name):
try:
a, n = name.split(':|:')
a = a.strip()
n = n.strip()
except ValueError:
return '', name
return a, n
def evaluate(self, *args):
raise NotImplementedError('Not implemented in abstract class')
def get_module_dependencies(self):
deps = set()
if hasattr(self, 'expression'): # NumexprFunction
deps.add('numexpr')
elif hasattr(self, 'owner'): # PythonFunction
deps.add(self.owner)
arg_map = self._apply_mapping()
for k in self.arg_list:
a = arg_map[k]
if isinstance(a, AbstractFunction):
deps.update(a.get_module_dependencies())
return tuple(deps)
def get_function_map(self, pctxt_callback=None, parent_arg_name=None):
if pctxt_callback is None:
log.warn('\'_pctxt_callback\' is None; using placeholder callback')
def raise_keyerror(*args):
raise KeyError()
pctxt_callback = raise_keyerror
arg_map = self._apply_mapping()
ret = {}
arg_count = 0
for k in self.arg_list:
a = arg_map[k]
if isinstance(a, AbstractFunction):
ret['arg_{0}'.format(arg_count)] = a.get_function_map(pctxt_callback, k)
else:
if isinstance(a, Number) or hasattr(a, '__iter__') and np.array([isinstance(ai, Number) for ai in a]).all():
# Treat numerical arguments as independents
a = '<{0}>'.format(self._get_map_name(k, a))
else:
# Check to see if the argument is a ParameterFunctionType
try:
spc = pctxt_callback(a)
if hasattr(spc.param_type, 'get_function_map'):
a = spc.param_type.get_function_map(parent_arg_name=k)
else:
# An independent parameter argument
a = '<{0}>'.format(self._get_map_name(k, a))
except KeyError:
a = '!{0}!'.format(self._get_map_name(k, a))
ret['arg_{0}'.format(arg_count)] = a
arg_count += 1
# Check to see if this expression represents a parameter
try:
pctxt_callback(self.name)
n = self._get_map_name(parent_arg_name, self.name)
except KeyError:
# It is an intermediate expression
n = '[{0}]'.format(self._get_map_name(parent_arg_name, self.name))
return {n: ret}
def __eq__(self, other):
ret = False
if isinstance(other, AbstractFunction):
sfm = self.get_function_map()
ofm = other.get_function_map()
ret = sfm == ofm
return ret
def __ne__(self, other):
return not self == other
class PythonFunction(AbstractFunction):
def __init__(self, name, owner, func_name, arg_list, kwarg_map=None, param_map=None, egg_uri='', remove_fills=True):
AbstractFunction.__init__(self, name, arg_list, param_map)
self.owner = owner
self.func_name = func_name
self.kwarg_map = kwarg_map
self.egg_uri = egg_uri
def _import_func(self):
try:
import importlib
module = importlib.import_module(self.owner)
self._callable = getattr(module, self.func_name)
except ImportError:
if self.egg_uri:
self.download_and_load_egg(self.egg_uri)
module = importlib.import_module(self.owner)
self._callable = getattr(module, self.func_name)
else:
raise
def evaluate(self, pval_callback, time_segment, fill_value=-9999, stride_length=None):
self._import_func()
arg_map = self._apply_mapping()
args = []
for k in self.arg_list:
a = arg_map[k]
if isinstance(a, AbstractFunction):
args.append(a.evaluate(pval_callback, time_segment, fill_value))
elif isinstance(a, Number) or hasattr(a, '__iter__') and np.array(
[isinstance(ai, Number) for ai in a]).all():
args.append(a)
else:
if k == 'pv_callback':
args.append(lambda arg: pval_callback(arg, time_segment))
else:
v = pval_callback(a, time_segment)
if isinstance(v, NumpyDictParameterData):
v = v.get_data()[a]
if k.endswith('*'):
v = v[-1]
args.append(v)
if self.kwarg_map is None:
return self._callable(*args)
else:
raise NotImplementedError('Handling for kwargs not yet implemented')
# TODO: Add handling for kwargs
# return self._callable(*args, **kwargs)
def _todict(self, exclude=None):
return super(PythonFunction, self)._todict(exclude=['_callable'])
@classmethod
def _fromdict(cls, cmdict, arg_masks=None):
ret = super(PythonFunction, cls)._fromdict(cmdict, arg_masks=arg_masks)
return ret
def __eq__(self, other):
ret = False
if super(PythonFunction, self).__eq__(other):
ret = self.owner == other.owner and self.func_name == other.func_name
return ret
@classmethod
def download_and_load_egg(cls, url):
'''
Downloads an egg from the URL specified into the cache directory
Returns the full path to the egg
'''
from tempfile import gettempdir
import os
import requests
import pkg_resources
# Get the filename based on the URL
filename = url.split('/')[-1]
# Store it in the $TMPDIR
egg_cache = gettempdir()
path = os.path.join(egg_cache, filename)
r = requests.get(url, stream=True)
if r.status_code == 200:
# Download the file using requests stream
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
# Add it to the working set of eggs
pkg_resources.working_set.add_entry(path)
return
raise IOError("Couldn't download the file at %s" % url)
class NumexprFunction(AbstractFunction):
def __init__(self, name, expression, arg_list, param_map=None):
AbstractFunction.__init__(self, name, arg_list, param_map)
self.expression = expression
def evaluate(self, pval_callback, time_segment, fill_value=-9999, stride_length=None):
arg_map = self._apply_mapping()
ld = {}
for k in self.arg_list:
a = arg_map[k]
if isinstance(a, AbstractFunction):
ld[k] = a.evaluate(pval_callback, time_segment, fill_value, stride_length=stride_length)
elif isinstance(a, Number) or hasattr(a, '__iter__') and np.array(
[isinstance(ai, Number) for ai in a]).all():
ld[k] = a
else:
if k.endswith('*'):
vals = pval_callback(a, time_segment, stride_length)
if isinstance(vals, NumpyDictParameterData):
vals = vals.get_data()[a]
ld[k[:-1]] = vals[-1]
else:
vals = pval_callback(a, time_segment, stride_length=stride_length)
if isinstance(vals, NumpyDictParameterData):
vals = vals.get_data()[a]
ld[k] = vals
return ne.evaluate(self.expression, local_dict=ld)
def __eq__(self, other):
ret = False
if super(NumexprFunction, self).__eq__(other):
ret = self.expression == other.expression
return ret
class ExternalFunction(PythonFunction):
def __init__(self, name, external_guid, external_name, owner=None, func_name=None, arg_list=[], kwarg_map=None, param_map={}, egg_uri='', remove_fills=True):
self.external_name = external_name
if func_name is None and owner is None:
owner = 'coverage_model.util.external_parameter_methods'
func_name = 'linear_map'
param_map[external_name] = external_guid
super(ExternalFunction, self).__init__(name, owner, func_name, arg_list=arg_list, kwarg_map=kwarg_map, egg_uri=egg_uri, param_map=param_map, remove_fills=remove_fills)
def evaluate(self, pval_callback, time_segment, fill_value=-9999, stride_length=None):
self._import_func()
from coverage_model.coverage import AbstractCoverage
cov = AbstractCoverage.resurrect(self.param_map[self.external_name], mode='r')
return self._callable(pval_callback, cov, self.external_name, time_segment)
| |
import os
import random
import re
import shutil
import tempfile
import threading
import time
from cassandra import ConsistencyLevel
from cassandra.concurrent import execute_concurrent_with_args
from ccmlib.node import NodeError
from assertions import assert_almost_equal, assert_not_running, assert_one
from dtest import DISABLE_VNODES, Tester, debug
from tools import (InterruptBootstrap, KillOnBootstrap, known_failure,
new_node, no_vnodes, query_c1c2, since)
def assert_bootstrap_state(tester, node, expected_bootstrap_state):
"""
Assert that a node is on a given bootstrap state
@param tester The dtest.Tester object to fetch the exclusive connection to the node
@param node The node to check bootstrap state
@param expected_bootstrap_state Bootstrap state to expect
Examples:
assert_bootstrap_state(self, node3, 'COMPLETED')
"""
session = tester.patient_exclusive_cql_connection(node)
assert_one(session, "SELECT bootstrapped FROM system.local WHERE key='local'", [expected_bootstrap_state])
class TestBootstrap(Tester):
def __init__(self, *args, **kwargs):
kwargs['cluster_options'] = {'start_rpc': 'true'}
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
]
Tester.__init__(self, *args, **kwargs)
self.allow_log_errors = True
def _base_bootstrap_test(self, bootstrap):
cluster = self.cluster
tokens = cluster.balanced_tokens(2)
cluster.set_configuration_options(values={'num_tokens': 1})
debug("[node1, node2] tokens: %r" % (tokens,))
keys = 10000
# Create a single node cluster
cluster.populate(1)
node1 = cluster.nodelist()[0]
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.start(wait_other_notice=True)
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
# record the size before inserting any of our own data
empty_size = node1.data_size()
debug("node1 empty size : %s" % float(empty_size))
insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)])
node1.flush()
node1.compact()
initial_size = node1.data_size()
debug("node1 size before bootstrapping node2: %s" % float(initial_size))
# Reads inserted data all during the bootstrap process. We shouldn't
# get any error
reader = self.go(lambda _: query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE))
# Bootstrapping a new node
node2 = bootstrap(cluster, tokens[1])
node2.compact()
reader.check()
node1.cleanup()
debug("node1 size after cleanup: %s" % float(node1.data_size()))
node1.compact()
debug("node1 size after compacting: %s" % float(node1.data_size()))
time.sleep(.5)
reader.check()
debug("node2 size after compacting: %s" % float(node2.data_size()))
size1 = float(node1.data_size())
size2 = float(node2.data_size())
assert_almost_equal(size1, size2, error=0.3)
assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size)))
assert_bootstrap_state(self, node2, 'COMPLETED')
@no_vnodes()
def simple_bootstrap_test(self):
def bootstrap(cluster, token):
node2 = new_node(cluster)
node2.set_configuration_options(values={'initial_token': token})
node2.start(wait_for_binary_proto=True)
return node2
self._base_bootstrap_test(bootstrap)
@no_vnodes()
def bootstrap_on_write_survey_test(self):
def bootstrap_on_write_survey_and_join(cluster, token):
node2 = new_node(cluster)
node2.set_configuration_options(values={'initial_token': token})
node2.start(jvm_args=["-Dcassandra.write_survey=true"], wait_for_binary_proto=True)
self.assertTrue(len(node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.')))
assert_bootstrap_state(self, node2, 'IN_PROGRESS')
node2.nodetool("join")
self.assertTrue(len(node2.grep_log('Leaving write survey mode and joining ring at operator request')))
return node2
self._base_bootstrap_test(bootstrap_on_write_survey_and_join)
def simple_bootstrap_test_nodata(self):
"""
@jira_ticket CASSANDRA-11010
Test that bootstrap completes if streaming from nodes with no data
"""
cluster = self.cluster
# Create a two-node cluster
cluster.populate(2)
cluster.start(wait_other_notice=True)
# Bootstrapping a new node
node3 = new_node(cluster)
node3.start(wait_for_binary_proto=True, wait_other_notice=True)
assert_bootstrap_state(self, node3, 'COMPLETED')
def read_from_bootstrapped_node_test(self):
"""
Test bootstrapped node sees existing data
@jira_ticket CASSANDRA-6648
"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor=2)'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
original_rows = list(session.execute("SELECT * FROM %s" % (stress_table,)))
node4 = new_node(cluster)
node4.start(wait_for_binary_proto=True)
session = self.patient_exclusive_cql_connection(node4)
new_rows = list(session.execute("SELECT * FROM %s" % (stress_table,)))
self.assertEquals(original_rows, new_rows)
def consistent_range_movement_true_with_replica_down_should_fail_test(self):
self._bootstrap_test_with_replica_down(True)
def consistent_range_movement_false_with_replica_down_should_succeed_test(self):
self._bootstrap_test_with_replica_down(False)
def consistent_range_movement_true_with_rf1_should_fail_test(self):
self._bootstrap_test_with_replica_down(True, rf=1)
def consistent_range_movement_false_with_rf1_should_succeed_test(self):
self._bootstrap_test_with_replica_down(False, rf=1)
def _bootstrap_test_with_replica_down(self, consistent_range_movement, rf=2):
"""
Test to check consistent bootstrap will not succeed when there are insufficient replicas
@jira_ticket CASSANDRA-11848
"""
cluster = self.cluster
cluster.populate(2)
node1, node2 = cluster.nodelist()
node3_token = None
# Make token assignment deterministic
if DISABLE_VNODES:
cluster.set_configuration_options(values={'num_tokens': 1})
tokens = cluster.balanced_tokens(3)
debug("non-vnode tokens: %r" % (tokens,))
node1.set_configuration_options(values={'initial_token': tokens[0]})
node2.set_configuration_options(values={'initial_token': tokens[2]})
node3_token = tokens[1] # Add node 3 between node1 and node2
cluster.start()
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor={})'.format(rf)])
# change system_auth keyspace to 2 (default is 1) to avoid
# "Unable to find sufficient sources for streaming" warning
if cluster.cassandra_version() >= '2.2.0':
session = self.patient_cql_connection(node1)
session.execute("""
ALTER KEYSPACE system_auth
WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};
""")
# Stop node2, so node3 will not be able to perform consistent range movement
node2.stop(wait_other_notice=True)
successful_bootstrap_expected = not consistent_range_movement
node3 = new_node(cluster, token=node3_token)
node3.start(wait_for_binary_proto=successful_bootstrap_expected, wait_other_notice=successful_bootstrap_expected,
jvm_args=["-Dcassandra.consistent.rangemovement={}".format(consistent_range_movement)])
if successful_bootstrap_expected:
# with rf=1 and cassandra.consistent.rangemovement=false, missing sources are ignored
if not consistent_range_movement and rf == 1:
node3.watch_log_for("Unable to find sufficient sources for streaming range")
self.assertTrue(node3.is_running())
assert_bootstrap_state(self, node3, 'COMPLETED')
else:
if consistent_range_movement:
node3.watch_log_for("A node required to move the data consistently is down")
else:
node3.watch_log_for("Unable to find sufficient sources for streaming range")
assert_not_running(node3)
@since('2.2')
def resumable_bootstrap_test(self):
"""
Test resuming bootstrap after data streaming failure
"""
cluster = self.cluster
cluster.populate(2)
node1 = cluster.nodes['node1']
# set up byteman
node1.byteman_port = '8100'
node1.import_config_files()
cluster.start(wait_other_notice=True)
# kill stream to node3 in the middle of streaming to let it fail
node1.byteman_submit(['./stream_failure.btm'])
node1.stress(['write', 'n=1K', 'no-warmup', 'cl=TWO', '-schema', 'replication(factor=2)', '-rate', 'threads=50'])
cluster.flush()
# start bootstrapping node3 and wait for streaming
node3 = new_node(cluster)
node3.start(wait_other_notice=False, wait_for_binary_proto=True)
# wait for node3 ready to query
node3.watch_log_for("Starting listening for CQL clients")
mark = node3.mark_log()
# check if node3 is still in bootstrap mode
assert_bootstrap_state(self, node3, 'IN_PROGRESS')
# bring back node1 and invoke nodetool bootstrap to resume bootstrapping
node3.nodetool('bootstrap resume')
node3.watch_log_for("Resume complete", from_mark=mark)
assert_bootstrap_state(self, node3, 'COMPLETED')
# cleanup to guarantee each node will only have sstables of its ranges
cluster.cleanup()
debug("Check data is present")
# Let's check stream bootstrap completely transferred data
stdout, stderr, _ = node3.stress(['read', 'n=1k', 'no-warmup', '-schema', 'replication(factor=2)', '-rate', 'threads=8'])
if stdout is not None:
self.assertNotIn("FAILURE", stdout)
@since('2.2')
def bootstrap_with_reset_bootstrap_state_test(self):
"""Test bootstrap with resetting bootstrap progress"""
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(2).start(wait_other_notice=True)
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=100K', '-schema', 'replication(factor=2)'])
node1.flush()
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# start bootstrapping node3 and wait for streaming
node3 = new_node(cluster)
try:
node3.start()
except NodeError:
pass # node doesn't start as expected
t.join()
node1.start()
# restart node3 bootstrap with resetting bootstrap progress
node3.stop()
mark = node3.mark_log()
node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"])
# check if we reset bootstrap state
node3.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
# wait for node3 ready to query
node3.watch_log_for("Listening for thrift clients...", from_mark=mark)
# check if 2nd bootstrap succeeded
assert_bootstrap_state(self, node3, 'COMPLETED')
def manual_bootstrap_test(self):
"""
Test adding a new node and bootstrapping it manually. No auto_bootstrap.
This test also verify that all data are OK after the addition of the new node.
@jira_ticket CASSANDRA-9022
"""
cluster = self.cluster
cluster.populate(2).start(wait_other_notice=True)
(node1, node2) = cluster.nodelist()
node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=2)',
'-rate', 'threads=1', '-pop', 'dist=UNIFORM(1..1000)'])
session = self.patient_exclusive_cql_connection(node2)
stress_table = 'keyspace1.standard1'
original_rows = list(session.execute("SELECT * FROM %s" % stress_table))
# Add a new node
node3 = new_node(cluster, bootstrap=False)
node3.start(wait_for_binary_proto=True)
node3.repair()
node1.cleanup()
current_rows = list(session.execute("SELECT * FROM %s" % stress_table))
self.assertEquals(original_rows, current_rows)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12437',
flaky=True,
notes='Windows')
def local_quorum_bootstrap_test(self):
"""
Test that CL local_quorum works while a node is bootstrapping.
@jira_ticket CASSANDRA-8058
"""
cluster = self.cluster
cluster.populate([1, 1])
cluster.start()
node1 = cluster.nodes['node1']
yaml_config = """
# Create the keyspace and table
keyspace: keyspace1
keyspace_definition: |
CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};
table: users
table_definition:
CREATE TABLE users (
username text,
first_name text,
last_name text,
email text,
PRIMARY KEY(username)
) WITH compaction = {'class':'SizeTieredCompactionStrategy'};
insert:
partitions: fixed(1)
batchtype: UNLOGGED
queries:
read:
cql: select * from users where username = ?
fields: samerow
"""
with tempfile.NamedTemporaryFile(mode='w+') as stress_config:
stress_config.write(yaml_config)
stress_config.flush()
node1.stress(['user', 'profile=' + stress_config.name, 'n=2M', 'no-warmup',
'ops(insert=1)', '-rate', 'threads=50'])
node3 = new_node(cluster, data_center='dc2')
node3.start(no_wait=True)
time.sleep(3)
out, err, _ = node1.stress(['user', 'profile=' + stress_config.name, 'ops(insert=1)',
'n=500K', 'no-warmup', 'cl=LOCAL_QUORUM',
'-rate', 'threads=5',
'-errors', 'retries=2'])
debug(out)
regex = re.compile("Operation.+error inserting key.+Exception")
failure = regex.search(out)
self.assertIsNone(failure, "Error during stress while bootstrapping")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11281',
flaky=True,
notes='windows')
def shutdown_wiped_node_cannot_join_test(self):
self._wiped_node_cannot_join_test(gently=True)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11281',
flaky=True,
notes='windows')
def killed_wiped_node_cannot_join_test(self):
self._wiped_node_cannot_join_test(gently=False)
def _wiped_node_cannot_join_test(self, gently):
"""
@jira_ticket CASSANDRA-9765
Test that if we stop a node and wipe its data then the node cannot join
when it is not a seed. Test both a nice shutdown or a forced shutdown, via
the gently parameter.
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node4 = new_node(cluster, bootstrap=True)
node4.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node4)
self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
# Stop the new node and wipe its data
node4.stop(gently=gently)
self._cleanup(node4)
# Now start it, it should not be allowed to join.
mark = node4.mark_log()
node4.start(no_wait=True, wait_other_notice=False)
node4.watch_log_for("A node with address /127.0.0.4 already exists, cancelling join", from_mark=mark)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11281',
flaky=True,
notes='windows')
def decommissioned_wiped_node_can_join_test(self):
"""
@jira_ticket CASSANDRA-9765
Test that if we decommission a node and then wipe its data, it can join the cluster.
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node4 = new_node(cluster, bootstrap=True)
node4.start(wait_for_binary_proto=True, wait_other_notice=True)
session = self.patient_cql_connection(node4)
self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
# Decommission the new node and wipe its data
node4.decommission()
node4.stop()
self._cleanup(node4)
# Now start it, it should be allowed to join
mark = node4.mark_log()
node4.start(wait_other_notice=True)
node4.watch_log_for("JOINING:", from_mark=mark)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11281',
flaky=True,
notes='windows')
def decommissioned_wiped_node_can_gossip_to_single_seed_test(self):
"""
@jira_ticket CASSANDRA-8072
@jira_ticket CASSANDRA-8422
Test that if we decommission a node, kill it and wipe its data, it can join a cluster with a single
seed node.
"""
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# Decommision the new node and kill it
debug("Decommissioning & stopping node2")
node2.decommission()
node2.stop(wait_other_notice=False)
# Wipe its data
for data_dir in node2.data_directories():
debug("Deleting {}".format(data_dir))
shutil.rmtree(data_dir)
commitlog_dir = os.path.join(node2.get_path(), 'commitlogs')
debug("Deleting {}".format(commitlog_dir))
shutil.rmtree(commitlog_dir)
# Now start it, it should be allowed to join
mark = node2.mark_log()
debug("Restarting wiped node2")
node2.start(wait_other_notice=False)
node2.watch_log_for("JOINING:", from_mark=mark)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11281',
flaky=True,
notes='windows, one fail on linux no-vnode 2.2')
def failed_bootstrap_wiped_node_can_join_test(self):
"""
@jira_ticket CASSANDRA-9765
Test that if a node fails to bootstrap, it can join the cluster even if the data is wiped.
"""
cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data, enough for the bootstrap to fail later on
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=100K', 'no-warmup', '-rate', 'threads=8'])
node1.flush()
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
# kill node2 in the middle of bootstrap
t = KillOnBootstrap(node2)
t.start()
node2.start()
t.join()
self.assertFalse(node2.is_running())
# wipe any data for node2
self._cleanup(node2)
# Now start it again, it should be allowed to join
mark = node2.mark_log()
node2.start(wait_other_notice=True)
node2.watch_log_for("JOINING:", from_mark=mark)
@since('2.1.1')
def simultaneous_bootstrap_test(self):
"""
Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.
Start a one node cluster and run a stress write workload.
Start up a second node, and wait for the first node to detect it has joined the cluster.
While the second node is bootstrapping, start a third node. This should fail.
@jira_ticket CASSANDRA-7069
@jira_ticket CASSANDRA-9484
"""
bootstrap_error = ("Other bootstrapping/leaving/moving nodes detected,"
" cannot bootstrap while cassandra.consistent.rangemovement is true")
self.ignore_log_patterns.append(bootstrap_error)
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
node1, = cluster.nodelist()
node1.stress(['write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=10'])
node2 = new_node(cluster)
node2.start(wait_other_notice=True)
node3 = new_node(cluster, remote_debug_port='2003')
process = node3.start(wait_other_notice=False)
stdout, stderr = process.communicate()
self.assertIn(bootstrap_error, stderr, msg=stderr)
time.sleep(.5)
self.assertFalse(node3.is_running(), msg="Two nodes bootstrapped simultaneously")
node2.watch_log_for("Starting listening for CQL clients")
session = self.patient_exclusive_cql_connection(node2)
# Repeat the select count(*) query, to help catch
# bugs like 9484, where count(*) fails at higher
# data loads.
for _ in xrange(5):
assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
def test_cleanup(self):
"""
@jira_ticket CASSANDRA-11179
Make sure we remove processed files during cleanup
"""
cluster = self.cluster
cluster.set_configuration_options(values={'concurrent_compactors': 4})
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
node1, = cluster.nodelist()
for x in xrange(0, 5):
node1.stress(['write', 'n=100k', 'no-warmup', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', 'replication(factor=1)', '-rate', 'threads=10'])
node1.flush()
node2 = new_node(cluster)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
event = threading.Event()
failed = threading.Event()
jobs = 1
thread = threading.Thread(target=self._monitor_datadir, args=(node1, event, len(node1.get_sstables("keyspace1", "standard1")), jobs, failed))
thread.start()
node1.nodetool("cleanup -j {} keyspace1 standard1".format(jobs))
event.set()
thread.join()
self.assertFalse(failed.is_set())
def _monitor_datadir(self, node, event, basecount, jobs, failed):
while True:
sstables = [s for s in node.get_sstables("keyspace1", "standard1") if "tmplink" not in s]
debug("---")
for sstable in sstables:
debug(sstable)
if len(sstables) > basecount + jobs:
debug("Current count is {}, basecount was {}".format(len(sstables), basecount))
failed.set()
return
if event.is_set():
return
time.sleep(.1)
def _cleanup(self, node):
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
for data_dir in node.data_directories():
debug("Deleting {}".format(data_dir))
shutil.rmtree(data_dir)
shutil.rmtree(commitlog_dir)
| |
""" Test functions for linalg module
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import itertools
import traceback
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, run_module_suite,
dec
)
def ifthen(a, b):
return not a or b
def imply(a, b):
return not a or b
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = 6
else:
decimal = 12
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
class LinalgCase(object):
def __init__(self, name, a, b, exception_cls=None):
assert isinstance(name, str)
self.name = name
self.a = a
self.b = b
self.exception_cls = exception_cls
def check(self, do):
if self.exception_cls is None:
do(self.a, self.b)
else:
assert_raises(self.exception_cls, do, self.a, self.b)
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
#
# Base test cases
#
np.random.seed(1234)
SQUARE_CASES = [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=csingle),
array([2.+1j, 1.+2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble),
array([2.+1j, 1.+2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1.+2j, 2+3j], [3+4j, 4+5j]], dtype=cdouble),
array([[2.+1j, 1.+2j, 1+3j], [1-2j, 1-3j, 1-6j]], dtype=cdouble)),
LinalgCase("empty",
atleast_2d(array([], dtype = double)),
atleast_2d(array([], dtype = double)),
linalg.LinAlgError),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
LinalgCase("matrix_b_only",
array([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
LinalgCase("matrix_a_and_b",
matrix([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
]
NONSQUARE_CASES = [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=csingle),
array([2.+1j, 1.+2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=csingle),
array([2.+1j, 1.+2j, 3.-3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble),
array([2.+1j, 1.+2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble),
array([2.+1j, 1.+2j, 3.-3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array([[1.+1j, 2.+2j, 3.-3j], [3.-5j, 4.+9j, 6.+2j]], dtype=cdouble),
array([[2.+1j, 1.+2j], [1-1j, 2-2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array([[1.+1j, 2.+2j], [3.-3j, 4.-9j], [5.-4j, 6.+8j]], dtype=cdouble),
array([[2.+1j, 1.+2j], [1-1j, 2-2j], [1-1j, 2-2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(11)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(5)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(1)),
]
HERMITIAN_CASES = [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2+3j], [2-3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2+3j], [2-3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
atleast_2d(array([], dtype = double)),
None,
linalg.LinAlgError),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_a_and_b",
matrix([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
]
#
# Gufunc test cases
#
GENERALIZED_SQUARE_CASES = []
GENERALIZED_NONSQUARE_CASES = []
GENERALIZED_HERMITIAN_CASES = []
for tgt, src in ((GENERALIZED_SQUARE_CASES, SQUARE_CASES),
(GENERALIZED_NONSQUARE_CASES, NONSQUARE_CASES),
(GENERALIZED_HERMITIAN_CASES, HERMITIAN_CASES)):
for case in src:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2*case.a, 3*case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7*case.b, 6*case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
case.exception_cls)
tgt.append(new_case)
a = np.array([case.a]*2*3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b]*2*3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
case.exception_cls)
tgt.append(new_case)
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)]*x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a*b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert np.all(xi == x)
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
for src in (SQUARE_CASES,
NONSQUARE_CASES,
HERMITIAN_CASES,
GENERALIZED_SQUARE_CASES,
GENERALIZED_NONSQUARE_CASES,
GENERALIZED_HERMITIAN_CASES):
new_cases = []
for case in src:
for a, a_tag in _stride_comb_iter(case.a):
for b, b_tag in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_tag + "_" + b_tag, a, b,
exception_cls=case.exception_cls)
new_cases.append(new_case)
src.extend(new_cases)
#
# Test different routines against the above cases
#
def _check_cases(func, cases):
for case in cases:
try:
case.check(func)
except Exception:
msg = "In test case: %r\n\n" % case
msg += traceback.format_exc()
raise AssertionError(msg)
class LinalgTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, SQUARE_CASES)
class LinalgNonsquareTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, NONSQUARE_CASES)
class LinalgGeneralizedTestCase(object):
@dec.slow
def test_generalized_sq_cases(self):
_check_cases(self.do, GENERALIZED_SQUARE_CASES)
class LinalgGeneralizedNonsquareTestCase(object):
@dec.slow
def test_generalized_nonsq_cases(self):
_check_cases(self.do, GENERALIZED_NONSQUARE_CASES)
class HermitianTestCase(object):
def test_herm_cases(self):
_check_cases(self.do, HERMITIAN_CASES)
class HermitianGeneralizedTestCase(object):
@dec.slow
def test_generalized_herm_cases(self):
_check_cases(self.do, GENERALIZED_HERMITIAN_CASES)
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class TestSolve(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0,:]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0,:])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0,:])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:,:, 0:0]
result = linalg.solve(a, b[:,:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:,0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class TestInv(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(a, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
class TestEigvals(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestEig(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[...,None,:],
rtol=get_rtol(evalues.dtype))
assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestSVD(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
u, s, vt = linalg.svd(a, 0)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[...,None,:],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False)
assert_equal(s.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
old_assert_almost_equal(s[0]/s[-1], linalg.cond(a), decimal=5)
class TestCond2(LinalgTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
old_assert_almost_equal(s[0]/s[-1], linalg.cond(a, 2), decimal=5)
class TestCondInf(object):
def test(self):
A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 3.)
class TestPinv(LinalgTestCase):
def do(self, a, b):
a_ginv = linalg.pinv(a)
assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
class TestDet(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestLstsq(LinalgTestCase, LinalgNonsquareTestCase):
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (np.asarray(abs(np.dot(a, x) - b))**2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
noninv = array([[1, 0], [0, 0]])
arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
large = identity(10)
t = large[1,:].copy()
large[1,:] = large[0,:]
large[0,:] = t
def test_large_power(self):
assert_equal(matrix_power(self.R90, 2**100+2**10+2**5+1), self.R90)
def test_large_power_trailing_zero(self):
assert_equal(matrix_power(self.R90, 2**100+2**10+2**5), identity(2))
def testip_zero(self):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity(M.shape[0]))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_one(self):
def tz(M):
mz = matrix_power(M, 1)
assert_equal(mz, M)
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_two(self):
def tz(M):
mz = matrix_power(M, 2)
assert_equal(mz, dot(M, M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_invert(self):
def tz(M):
mz = matrix_power(M, -1)
assert_almost_equal(identity(M.shape[0]), dot(mz, M))
for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
yield tz, M
def test_invert_noninvertible(self):
import numpy.linalg
assert_raises(numpy.linalg.linalg.LinAlgError,
lambda: matrix_power(self.noninv, -1))
class TestBoolPower(object):
def test_square(self):
A = array([[True, False], [True, True]])
assert_equal(matrix_power(A, 2), A)
class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b):
# note that eigenvalue arrays must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
ev.sort(axis=-1)
evalues.sort(axis=-1)
assert_allclose(ev, evalues,
rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
ev2.sort(axis=-1)
assert_allclose(ev2, evalues,
rtol=get_rtol(ev.dtype))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0],[1, 0]], dtype=np.double)
Kup = np.array([[0, 1],[0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(np.sort(w), tgt, rtol=rtol)
class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b):
# note that eigenvalue arrays must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
ev.sort(axis=-1)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[...,None,:] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
ev2.sort(axis=-1)
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[...,None,:] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0],[1, 0]], dtype=np.double)
Kup = np.array([[0, 1],[0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(np.sort(w), tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(np.sort(w), tgt, rtol=rtol)
class _TestNorm(object):
dt = None
dec = None
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30**0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0/25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30**0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205./144)**-0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_matrix(self):
A = matrix([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84**0.5)
assert_almost_equal(norm(A, 'fro'), 84**0.5)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k,:], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
assert_almost_equal(norm(A, ord=order), norm(A, ord=order,
axis=(0, 1)))
n = norm(B, ord=order, axis=(1, 2))
expected = [norm(B[k], ord=order) for k in range(B.shape[0])]
assert_almost_equal(n, expected)
n = norm(B, ord=order, axis=(2, 1))
expected = [norm(B[k].T, ord=order) for k in range(B.shape[0])]
assert_almost_equal(n, expected)
n = norm(B, ord=order, axis=(0, 2))
expected = [norm(B[:, k,:], ord=order) for k in range(B.shape[1])]
assert_almost_equal(n, expected)
n = norm(B, ord=order, axis=(0, 1))
expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])]
assert_almost_equal(n, expected)
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'` raises a
# ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(ValueError, norm, B, None, 3)
assert_raises(ValueError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
class TestNormDouble(_TestNorm):
dt = np.double
dec = 12
class TestNormSingle(_TestNorm):
dt = np.float32
dec = 6
class TestNormInt64(_TestNorm):
dt = np.int64
dec = 12
class TestMatrixRank(object):
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I=np.eye(4); I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions raises error
yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2))
# works on scalar
yield assert_equal, matrix_rank(1), 1
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR(object):
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
def test_qr_empty(self):
a = np.zeros((0, 2))
assert_raises(linalg.LinAlgError, linalg.qr, a)
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
b = a.astype(np.single)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = array([[1, 2], [3, 4]])
b = array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
from nose import SkipTest
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
raise SkipTest("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1]])
np.linalg.lapack_lite.dgetrf(
1, 1, a.astype(np.double),
0, # <- invalid value
a.astype(np.intc), 0)
except ValueError as e:
if "DGETRF parameter number 4" in str(e):
# success
os._exit(os.EX_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != os.EX_OK or os.WIFSIGNALED(status):
raise SkipTest('Numpy xerbla not linked in.')
if __name__ == "__main__":
run_module_suite()
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 12:32:51 2015
@author: paulinkenbrandt
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from scipy.stats import linregress
import numpy as np
from collections import OrderedDict
from datetime import datetime, timedelta
def get_recess_int(df, Q, maxper=18, minper=6, thresh=30, inplace=False):
"""Gets intervals of recession from a hydrograph
:param df: DataFrame with hydrograph data
:param Q: Field in DataFrame with discharge data
:param maxper: Period of record to scan discharge data for maxima; created for daily values; defaults to 18
:param minper: Period of record to scan discharge data for minima; should be less than maxper; defaulst to 6
:param thresh: Threshold of discharge below which maxima are not considered; defaults to 30
:param inplace: Append to input database or create new one; defaults to False
:return: DataFrame of original data and Max and Min, Indexes of maxima, Indexes of minima
"""
if inplace:
data = df
else:
data = df.copy()
data['max'] = data[Q].rolling(maxper,center=True).max()
data['max'] = data.ix[data['max'] == data['value'],'max']
data['max'] = data.ix[data['max'] > thresh, 'max']
data['min'] = data[Q].rolling(minper,center=True).min()
data['min'] = data.ix[data['min'] == data['value'],'min']
maxlist = data.index[data['max'].notnull()]
firstmin = []
for ind in maxlist:
firstmin.append(data.ix[ind:,'min'].first_valid_index())
data['min'] = data.ix[data.index.isin(firstmin),'min']
return data, maxlist, firstmin
class recess(object):
"""Creates recession curve and modeled output to describe spring and streamflow recession.
:param df: dataframe with spring discharge data
:param Q: string indicating discharge field in df in units of gpm
:param st: start date to examine data in [YYYY, MM, DD] format, where values are integers in an array
:param end: end date to examine data
:param excs: = begin date of exclusion period
:param excf: = end date of exclusion period
:type st: list
:type end: list
:type Q: str
:return popt: alpha value for recession curve
:return x1: days from start of recession
:return x2: dates of recession curve analysis
:return y1: points used for recession curve analysis
:return y2: recession curve values
:returns: Plot of recession curve
"""
def __init__(self, df, Q, st, end='', lab='', excs=[0, 0, 0], excf=[0, 0, 0]):
self.ymd = [datetime.now().year, datetime.now().month, datetime.now().day]
if end == '':
end = self.ymd
if lab == '':
self.Qlab = 'Discharge'
else:
self.Qlab = lab
self.Qz = df[Q][0]
self.rec_results = self.recession(df, Q, st, end, excs, excf)
def fitit(self, x, y, Q):
from scipy.optimize import curve_fit
func = lambda x, c: Q * np.exp(-1 * c * x)
popt, pcov = curve_fit(func, x, y, p0=(1e-1))
return popt, pcov
def recession(self, df, Q, st, end, excs, excf):
"""Creates recession curve and modeled output to describe spring and streamflow recession.
The user puts in a dataframe with discharge data and defines the date range of recession. The Class will return
recession values of the date range given.
:param df: DataFrame with spring discharge data
:type df: pandas.core.frame.DataFrame
:param Q: discharge field in df in units of gpm
:type Q: str
:param st: start date to examine data in [YYYY, MM, DD] format, where values are integers in an array
:type st: list
:param end: end date to examine data
:type end: list
:param excs: begin date of exclusion period
:param excf: end date of exclusion period
:type excs: list
:type excs: list
:returns: popt1, x1, x2, y1, y2
:return popt1: alpha value for recession curve
:return x1: days from start of recession
:return x2: dates of recession curve analysis
:return y1: points used for recession curve analysis
:return y2: recession curve values
"""
# account for hours in time input
if len(st) == 3 and len(end) == 3:
df1 = df[(df.index >= pd.datetime(st[0], st[1], st[2])) & (df.index <= pd.datetime(end[0], end[1], end[2]))]
else:
df1 = df[(df.index >= pd.datetime(st[0], st[1], st[2], st[3], st[4])) & (
df.index <= pd.datetime(end[0], end[1], end[2], st[3], st[4]))]
# account for hours in time input
if excs[0] == 0:
pass
else:
if len(excs) == 3:
df1 = df1[(df1.index < pd.datetime(excs[0], excs[1], excs[2])) | (
df1.index > pd.datetime(excf[0], excf[1], excf[2]))]
else:
df1 = df1[(df1.index < pd.datetime(excs[0], excs[1], excs[2], excs[3], excs[4])) | (
df1.index > pd.datetime(excf[0], excf[1], excf[2], excf[3], excf[4]))]
df2 = df1.dropna(subset=[Q])
y1 = df2[Q]
x1 = (df2.index.to_julian_date() - df2.index.to_julian_date()[0]) # convert to numeric days for opt. function
popt1, pcov1 = self.fitit(x1, y1, y1[0]) # fit curve
x2 = [df2.index[0] + timedelta(i) for i in x1] # convert back to dates for labels
y2 = [y1[0] * np.exp(-1 * popt1[0] * i) for i in x1] # run function with optimized variables
plt.plot(x2, y2, label='Recession (alpha = %.3f)' % popt1[0]) # report alpha value
plt.scatter(x2, y1, label='Discharge')
plt.ylabel(self.Qlab)
plt.legend(scatterpoints=1)
plt.show()
return popt1, x1, x2, y1, y2
class piper(object):
"""Class that generates rectangular piper diagrams.
:param df: DataFrame containing chemistry data; must have fields labeled as abbreviations of the major ions; Na, K,
NaK, Ca, Mg, Cl, HCO3, CO3, and SO4
:type df: pandas.core.frame.DataFrame
:param type_col: Name of field that designates the sample type (optional); defaults to ''
:type type_col: str
:param var_col: Name of field that contains a scalar variable to be designated by color (optional); defaults to ''
:type var_col: str
.. note::
Hydrochemistry - Construct Rectangular Piper plot
Adopted from: Ray and Mukherjee, 2008, Groundwater 46(6): 893-896 and from code found at:
http://python.hydrology-amsterdam.nl/scripts/piper_rectangular.py
Based on code by:
B.M. van Breukelen <b.m.vanbreukelen@vu.nl>
"""
def __init__(self, df, type_col='', var_col=''):
self.fieldnames = [u'Na', u'K', u'NaK', u'Ca', u'Mg', u'Cl', u'HCO3', u'CO3', u'SO4']
self.anions = ['Cl', 'HCO3', 'CO3', 'SO4']
self.cations = ['Na', 'K', 'Ca', 'Mg', 'NaK']
self.piperplot(df, type_col, var_col)
def fillMissing(self, df):
# fill in nulls with 0
for col in df.columns:
if col in self.fieldnames:
for i in df.index:
if df.loc[i, col] is None or df.loc[i, col] == '' or np.isnan(df.loc[i, col]):
df.loc[i, col] = 0
else:
df.col = 0
# add missing columns
for name in self.fieldnames:
if name in df.columns:
pass
else:
print(name)
df[name] = 0
return df
def check_nak(self, x):
if x[0] == 0 and x[2] > 0:
return x[2]
else:
return x[0] + x[1]
def convertIons(self, df):
"""Convert major ion concentrations from mg/L to meq
This function uses conversion factors to convert the concentrations of major ions from mg/L to meq. It also
appends a field to the input database listing the Cation-Anion pair that have the highest meq concentrations.
:param df: DataFrame containing chemistry data; must have fields labeled as abbreviations of the major ions; Na, K,
NaK, Ca, Mg, Cl, HCO3, CO3, and SO4
:returns: appends convert fields onto DataFrame with the suffix `_meq` and adds the fields 'water type', 'CBE'
(charge balance), 'EC' (Sum(anions+cations))
"""
# Conversion factors from mg/L to meq/L
d = {'Ca': 0.04990269, 'Mg': 0.082287595, 'Na': 0.043497608, 'K': 0.02557656, 'Cl': 0.028206596, 'NaK': 0.043497608,
'HCO3': 0.016388838, 'CO3': 0.033328223, 'SO4': 0.020833333, 'NO2': 0.021736513, 'NO3': 0.016129032}
df1 = df
for name in self.fieldnames:
if name in df.columns:
df1[name + '_meq'] = df1[name].apply(lambda x: float(d.get(name, 0)) * x, 1)
df1['NaK_meq'] = df1[['Na_meq', 'K_meq','NaK_meq']].apply(lambda x: self.check_nak(x), 1)
df1['anions'] = 0
df1['cations'] = 0
for ion in self.anions:
if ion in df.columns:
df1['anions'] += df1[ion + '_meq']
for ion in self.cations:
if ion in df1.columns:
df1['cations'] += df1[ion + '_meq']
df1['total_ions'] = df1['cations'] + df1['anions']
df1['EC'] = df1['anions'] - df1['cations']
df1['CBE'] = df1['EC'] / (df1['anions'] + df1['cations'])
df1['maj_cation'] = df1[['Ca_meq','Mg_meq','Na_meq','K_meq','NaK_meq']].idxmax(axis=1)
df1['maj_anion'] = df1[['Cl_meq','SO4_meq','HCO3_meq','CO3_meq']].idxmax(axis=1)
df1['water_type'] = df1[['maj_cation','maj_anion']].apply(lambda x: str(x[0])[:-4]+'-'+str(x[1])[:-4],1)
return df1
def ionPercentage(self, df):
"""Determines percentage of charge for each ion for display on the piper plot"""
for ion in self.anions:
df[ion + 'EC'] = df[[ion + '_meq', 'anions']].apply(lambda x: 100 * x[0] / x[1], 1)
for ion in self.cations:
df[ion + 'EC'] = df[[ion + '_meq', 'cations']].apply(lambda x: 100 * x[0] / x[1], 1)
return df
def piperplot(self, df, type_col, var_col):
"""Generates a rectangular piper diagram"""
self.fillMissing(df)
self.convertIons(df)
self.ionPercentage(df)
CaEC = df['CaEC'].values
MgEC = df['MgEC'].values
ClEC = df['ClEC'].values
SO4EC = df['SO4EC'].values
NaKEC = df['NaKEC'].values
SO4ClEC = df[['ClEC', 'SO4EC']].apply(lambda x: x[0] + x[1], 1).values
num_samps = len(df)
if var_col == '':
Elev = ''
else:
Elev = df[var_col].values
if type_col == '':
typ = ['Station']*num_samps
stationtypes = ['Station']
else:
stationtypes = list(df[type_col].unique())
typ = df[type_col].values
# Change default settings for figures
plt.rc('xtick', labelsize=10)
plt.rc('ytick', labelsize=10)
plt.rc('font', size=12)
plt.rc('legend', fontsize=12)
plt.rc('figure', figsize=(14, 5.5)) # defines size of Figure window orig (14,4.5)
markSize = 30
lineW = 0.5
# Make Figure
fig = plt.figure()
# add title
# fig.suptitle(piperTitle, x=0.20,y=.98, fontsize=14 )
# Colormap and Saving Options for Figure
if len(Elev) > 0:
vart = Elev
else:
vart = [1] * num_samps
cNorm = plt.Normalize(vmin=min(vart), vmax=max(vart))
cmap = plt.cm.coolwarm
# pdf = PdfPages(fileplace)
mrkrSymbl = ['v', '^', '+', 's', '.', 'o', '*', 'v', '^', '+', 's', ',', '.', 'o', '*', 'v', '^', '+', 's', ',',
'.', 'o', '*', 'v', '^', '+', 's', ',', '.', 'o', '*']
# count variable for legend (n)
unique, counts = np.unique(typ, return_counts=True)
nstatTypesDict = dict(zip(unique, counts))
typdict = {}
for i in range(len(stationtypes)):
typdict[stationtypes[i]] = mrkrSymbl[i]
# CATIONS-----------------------------------------------------------------------------
# 2 lines below needed to create 2nd y-axis (ax1b) for first subplot
ax1 = fig.add_subplot(131)
ax1b = ax1.twinx()
ax1.fill([100, 0, 100, 100], [0, 100, 100, 0], color=(0.8, 0.8, 0.8))
ax1.plot([100, 0], [0, 100], 'k')
ax1.plot([50, 0, 50, 50], [0, 50, 50, 0], 'k--')
ax1.text(25, 15, 'Na type')
ax1.text(75, 15, 'Ca type')
ax1.text(25, 65, 'Mg type')
if len(typ) > 0:
for j in range(len(typ)):
ax1.scatter(CaEC[j], MgEC[j], s=markSize, c=vart[j], cmap=cmap, norm=cNorm, marker=typdict[typ[j]],
linewidths=lineW)
else:
ax1.scatter(CaEC, MgEC, s=markSize, c=vart, cmap=cmap, norm=cNorm, linewidths=lineW)
ax1.set_xlim(0, 100)
ax1.set_ylim(0, 100)
ax1b.set_ylim(0, 100)
ax1.set_xlabel('<= Ca (% meq)')
ax1b.set_ylabel('Mg (% meq) =>')
plt.setp(ax1, yticklabels=[])
# next line needed to reverse x axis:
ax1.set_xlim(ax1.get_xlim()[::-1])
# ANIONS----------------------------------------------------------------------------
ax = fig.add_subplot(1, 3, 3)
ax.fill([100, 100, 0, 100], [0, 100, 100, 0], color=(0.8, 0.8, 0.8))
ax.plot([0, 100], [100, 0], 'k')
ax.plot([50, 50, 0, 50], [0, 50, 50, 0], 'k--')
ax.text(55, 15, 'Cl type')
ax.text(5, 15, 'HCO3 type')
ax.text(5, 65, 'SO4 type')
if len(typ) > 0:
for j in range(len(typ)):
labs = "{:} n= {:}".format(typ[j],nstatTypesDict[typ[j]])
if float(nstatTypesDict[typ[j]]) > 1:
s = ax.scatter(ClEC[j], SO4EC[j], s=markSize, c=vart[j], cmap=cmap, norm=cNorm,
marker=typdict[typ[j]], label=labs, linewidths=lineW)
else:
s = ax.scatter(ClEC[j], SO4EC[j], s=markSize, c=vart[j], cmap=cmap, norm=cNorm,
marker=typdict[typ[j]], label=typ[j], linewidths=lineW)
else:
s = ax.scatter(ClEC, SO4EC, s=markSize, c=vart, cmap=cmap, norm=cNorm, label='Sample', linewidths=lineW)
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
ax.set_xlabel('Cl (% meq) =>')
ax.set_ylabel('SO4 (% meq) =>')
# CATIONS AND ANIONS COMBINED ---------------------------------------------------------------
# 2 lines below needed to create 2nd y-axis (ax1b) for first subplot
ax2 = fig.add_subplot(132)
ax2b = ax2.twinx()
ax2.plot([0, 100], [10, 10], 'k--')
ax2.plot([0, 100], [50, 50], 'k--')
ax2.plot([0, 100], [90, 90], 'k--')
ax2.plot([10, 10], [0, 100], 'k--')
ax2.plot([50, 50], [0, 100], 'k--')
ax2.plot([90, 90], [0, 100], 'k--')
if len(typ) > 0:
for j in range(len(typ)):
ax2.scatter(NaKEC[j], SO4ClEC[j], s=markSize, c=vart[j], cmap=cmap, norm=cNorm, marker=typdict[typ[j]],
linewidths=lineW)
else:
ax2.scatter(NaKEC, SO4ClEC, s=markSize, c=vart, cmap=cmap, norm=cNorm, linewidths=lineW)
ax2.set_xlim(0, 100)
ax2.set_ylim(0, 100)
ax2.set_xlabel('Na+K (% meq) =>')
ax2.set_ylabel('SO4+Cl (% meq) =>')
ax2.set_title('<= Ca+Mg (% meq)', fontsize=12)
ax2b.set_ylabel('<= CO3+HCO3 (% meq)')
ax2b.set_ylim(0, 100)
# next two lines needed to reverse 2nd y axis:
ax2b.set_ylim(ax2b.get_ylim()[::-1])
# Align plots
plt.subplots_adjust(left=0.05, bottom=0.35, right=0.95, top=0.90, wspace=0.4, hspace=0.0)
# Legend-----------------------------------------------------------------------------------------
# Add colorbar below legend
# [left, bottom, width, height] where all quantities are in fractions of figure width and height
if len(typ) > 0:
handles, labels = ax.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), loc='lower center', ncol=5, shadow=False, fancybox=True,
bbox_to_anchor=(0.5, -0.3), scatterpoints=1)
if len(Elev) > 0:
cax = fig.add_axes([0.25, 0.10, 0.50, 0.02])
cb1 = plt.colorbar(s, cax=cax, cmap=cmap, norm=cNorm, orientation='horizontal') # use_gridspec=True
cb1.set_label(var_col, size=8)
self.plot = fig
self.df = df
def fdc(df, site, begyear=1900, endyear=2015, normalizer=1, plot=True):
"""Generate flow duration curve for hydrologic time series data
:param df: DataFrame with discharge data of interest; must have a date or date-time as the index
:type df: pandas.core.frame.DataFrame
:param site: Name of DataFrame column in df containing discharge data
:type site: str
:param begyear: beginning year of analysis; defaults to 1900
:type begyear: int
:param endyear: end year of analysis; defaults to 2015
:type endyear: int
:param normalizer: value to use to normalize discharge; defaults to 1 (no normalization)
:type normalizer: int
:param plot: Whether to generate the plot or just return the variables for a plot; defaults to true
:type plot: bool
:returns: matplotlib plot displaying the flow duration curve of the data
:return prob: x field stating the probability of a discharge in data
:rtype prob: list
:return data: y field stating the discharge for probability prob
:rtype data: list
"""
from scipy import stats as sp
# limit dataframe to only the site
df = df[[site]]
# filter dataframe to only include dates of interest
data = df[
(pd.to_datetime(df.index) > pd.datetime(begyear, 1, 1)) & (pd.to_datetime(df.index) < pd.datetime(endyear, 1, 1))]
# remove na values from dataframe
data = data.dropna()
# take average of each day of year (from 1 to 366) over the selected period of record
data['doy'] = data.index.dayofyear
dailyavg = data[site].groupby(data['doy']).mean()
data = np.sort(dailyavg)
## uncomment the following to use normalized discharge instead of discharge
# mean = np.mean(data)
# std = np.std(data)
# data = [(data[i]-np.mean(data))/np.std(data) for i in range(len(data))]
data = [(data[i]) / normalizer for i in range(len(data))]
# ranks data from smallest to largest
ranks = sp.rankdata(data, method='average')
# reverses rank order
ranks = ranks[::-1]
# calculate probability of each rank
prob = [(ranks[i] / (len(data) + 1)) for i in range(len(data))]
# plot data via matplotlib
if plot:
plt.plot(prob, data, label=site + ' ' + str(begyear) + '-' + str(endyear))
else:
pass
return prob, data
class gantt(object):
"""Class to create gantt plots and to summarize pandas timeseries dataframes.
Finds gaps and measuring duration of data.
:param df: The DataFrame with a datetime index and columns as site time-series data; each column name
should be the site name or the site labels should be input for chart
:param stations: List of columns to include in the chart; defaults to all columns
:param labels: Labels to use in the resulting plot for each station; must be equal to the length of stations list;
defaults to stations
:param samp_int: regular interval that the datetime index will be resampled. Defaults to daily ('D');
see http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases for all of the options
:type samp_int: str
:type df: pandas.core.frame.DataFrame
:type stations: list
:type labels: list
.. note::
`.stations` produces a list describing the stations put into the class
`.labels` produces a list describing the labels put into the class
`.dateranges` is a dictionary describing gaps in the dataframe based on the presence of nan values in the frame
`.ganttPlotter()` plots a gantt plot
"""
def __init__(self, df, stations=[], labels=[], samp_int = 'D'):
if len(stations) == 0:
stations = df.columns
if len(labels) == 0:
labels = stations
self.data = df.resample(samp_int).mean()
self.stations = stations
self.labels = labels
self.dateranges = self.markGaps()
self.sitestats = self.site_info()
print(
'Data Loaded \nType .ganttPlotter() after your defined object to make plot\nType .sitestats after your defined object to get summary stats')
def markGaps(self):
"""Produces dictionary of list of gaps in time series data based on the presence of nan values;
used for gantt plotting
:returns: dateranges; a dictionary with station names as keys and lists of begin and end dates as values
"""
df = self.data
stations = self.stations
dateranges = {}
for station in stations:
dateranges[station] = []
first = df.ix[:, station].first_valid_index()
last = df.ix[:, station].last_valid_index()
records = df.ix[first:last, station]
#dateranges[station].append(pd.to_datetime(first))
for i in range(len(records) - 1):
if pd.isnull(records[i + 1]) and pd.notnull(records[i]):
dateranges[station].append(pd.to_datetime(records.index)[i])
elif pd.isnull(records[i]) and pd.notnull(records[i + 1]):
dateranges[station].append(pd.to_datetime(records.index)[i])
dateranges[station].append(pd.to_datetime(last))
return dateranges
def site_info(self):
"""Creates a table of summary statistics for all of the stations in the stations field defined in the class
:returns: site_info; a table of summary statistics; first, last, min, max, std, median, avg, 25%tile, 75%tile,
and count
"""
stations = self.stations
df = self.data
stat, first, last, minum, maxum, stdev, medin, avg, q25, q75, count = [], [], [], [], [], [], [], [], [], [], []
for station in stations:
stdt = df.ix[:, station]
stat.append(station)
first.append(stdt.first_valid_index())
last.append(stdt.last_valid_index())
minum.append(stdt.min())
maxum.append(stdt.max())
stdev.append(stdt.std())
medin.append(stdt.median())
avg.append(stdt.mean())
q25.append(stdt.quantile(0.25))
q75.append(stdt.quantile(0.75))
count.append(stdt.count())
colm = {'StationId': stat, 'first': first, 'last': last, 'min': minum, 'max': maxum,
'std': stdev, 'median': medin, 'mean': avg, 'q25': q25, 'q75': q75, 'count': count}
Site_Info = pd.DataFrame(colm)
return Site_Info
def ganttPlotter(self):
"""Plots gantt plot using dictionary of stations and associated start and end dates;
uses output from markGaps function"""
labs, tickloc, col = [], [], []
dateranges = self.dateranges
stations = self.stations
labels = self.labels
# create color iterator for multi-color lines in gantt chart
color = iter(plt.cm.Dark2(np.linspace(0, 1, len(stations))))
plt.figure(figsize=[8, 10])
fig, ax = plt.subplots()
for i in range(len(stations)):
c = next(color)
for j in range(len(dateranges[stations[i]]) - 1):
if (j + 1) % 2 != 0:
if len(labels) == 0 or len(labels) != len(stations):
plt.hlines(i + 1, dateranges[stations[i]][j], dateranges[stations[i]][j + 1], label=stations[i],
color=c, linewidth=3)
else:
plt.hlines(i + 1, dateranges[stations[i]][j], dateranges[stations[i]][j + 1], label=labels[i],
color=c, linewidth=3)
labs.append(stations[i])
tickloc.append(i + 1)
col.append(c)
plt.ylim(0, len(stations) + 1)
if len(labels) == 0 or len(labels) != len(stations):
labels = stations
plt.yticks(tickloc, labs)
else:
plt.yticks(tickloc, labels)
plt.xlabel('Date')
plt.ylabel('Station Name')
plt.grid(linewidth=0.2)
gytl = plt.gca().get_yticklabels()
for i in range(len(gytl)):
gytl[i].set_color(col[i])
plt.tight_layout()
return fig
def gantt(self):
"""This function runs the other functions in this class."""
stations = self.stations
labels = self.labels
df = self.data
df1 = df.ix[:, stations]
df1.sort_index(inplace=True)
Site_Info = self.site_info()
dateranges = self.markGaps()
fig = self.ganttPlotter()
return Site_Info, dateranges, fig
def scatterColor(x0, y, w):
"""Creates scatter plot with points colored by variable.
All input arrays must have matching lengths
:param x0: x values to plot
:type x0: list
:param y: y values to plot
:type y: list
:param w: z values to plot
:returns: plot; slope and intercept of the RLM best fit line shown on the plot
.. warning:: all input arrays must have matching lengths and scalar values
.. note:: See documentation at http://statsmodels.sourceforge.net/0.6.0/generated/statsmodels.robust.robust_linear_model.RLM.html
for the RLM line
"""
cmap = plt.cm.get_cmap('RdYlBu')
norm = mpl.colors.Normalize(vmin=w.min(), vmax=w.max())
m = cm.ScalarMappable(norm=norm, cmap=cmap)
m.set_array(w)
plt.scatter(x0, y, label='', color=m.to_rgba(w))
slope, intercept, r_value, p_value, std_err = linregress(x0, y)
x1 = np.arange(np.min(x0), np.max(x0), 0.1)
y1 = [i * slope + intercept for i in x1]
plt.plot(x1, y1, c='g',
label='simple linear regression m = {:.2f} b = {:.0f}, r^2 = {:.2f}'.format(slope, intercept, r_value ** 2))
plt.legend()
cbar = plt.colorbar(m)
cbar.set_label('Julian Date')
return slope, intercept
def peakdet(v, delta, x=None):
"""
:param v: data vector
:param delta:
:param x:
:returns: two arrays
.. note:: https://gist.github.com/endolith/250860
Converted from MATLAB script at http://billauer.co.il/peakdet.html
"""
import sys
maxtab = []
mintab = []
if x is None:
x = np.arange(len(v))
v = np.asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not np.isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = np.Inf, -np.Inf
mnpos, mxpos = np.NaN, np.NaN
lookformax = True
for i in np.arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return np.array(maxtab), np.array(mintab)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for operators with > 3 or arbitrary numbers of arguments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class NAryOpsTest(xla_test.XLATestCase):
def _testNAry(self, op, args, expected, equality_fn=None):
with self.test_session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def _nAryListCheck(self, results, expected, **kwargs):
self.assertEqual(len(results), len(expected))
for (r, e) in zip(results, expected):
self.assertAllClose(r, e, **kwargs)
def _testNAryLists(self, op, args, expected):
self._testNAry(op, args, expected, equality_fn=self._nAryListCheck)
def testFloat(self):
self._testNAry(math_ops.add_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=np.array([[1, 2, 3]], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([1, 2], dtype=np.float32),
np.array([10, 20], dtype=np.float32)],
expected=np.array([11, 22], dtype=np.float32))
self._testNAry(math_ops.add_n,
[np.array([-4], dtype=np.float32),
np.array([10], dtype=np.float32),
np.array([42], dtype=np.float32)],
expected=np.array([48], dtype=np.float32))
def testComplex(self):
for dtype in self.complex_types:
self._testNAry(
math_ops.add_n, [np.array([[1 + 2j, 2 - 3j, 3 + 4j]], dtype=dtype)],
expected=np.array([[1 + 2j, 2 - 3j, 3 + 4j]], dtype=dtype))
self._testNAry(
math_ops.add_n, [
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.array([10j, 20], dtype=dtype)
],
expected=np.array([1 + 12j, 22 - 3j], dtype=dtype))
self._testNAry(
math_ops.add_n, [
np.array([-4, 5j], dtype=dtype),
np.array([2 + 10j, -2], dtype=dtype),
np.array([42j, 3 + 3j], dtype=dtype)
],
expected=np.array([-2 + 52j, 1 + 8j], dtype=dtype))
@unittest.skip("IdentityN is temporarily CompilationOnly as workaround")
def testIdentityN(self):
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2, 3]], dtype=np.float32)],
expected=[np.array([[1, 2, 3]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1, 2], [3, 4]], dtype=np.float32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
self._testNAryLists(array_ops.identity_n,
[np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)],
expected=[
np.array([[1], [2], [3], [4]], dtype=np.int32),
np.array([[3, 2, 1], [6, 5, 1]], dtype=np.float32)])
def testConcat(self):
self._testNAry(
lambda x: array_ops.concat(x, 0), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32))
self._testNAry(
lambda x: array_ops.concat(x, 1), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
],
expected=np.array(
[[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], dtype=np.float32))
def testOneHot(self):
with self.test_session() as session, self.test_scope():
indices = array_ops.constant(np.array([[2, 3], [0, 1]], dtype=np.int32))
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.float32(7), off_value=np.float32(3))
output = session.run(op)
expected = np.array([[[3, 3, 7, 3], [3, 3, 3, 7]],
[[7, 3, 3, 3], [3, 7, 3, 3]]],
dtype=np.float32)
self.assertAllEqual(output, expected)
op = array_ops.one_hot(indices,
np.int32(4),
on_value=np.int32(2), off_value=np.int32(1),
axis=1)
output = session.run(op)
expected = np.array([[[1, 1], [1, 1], [2, 1], [1, 2]],
[[2, 1], [1, 2], [1, 1], [1, 1]]],
dtype=np.int32)
self.assertAllEqual(output, expected)
def testSplitV(self):
with self.test_session() as session:
with self.test_scope():
output = session.run(
array_ops.split(np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 0, 1, 2]],
dtype=np.float32),
[2, 2], 1))
expected = [np.array([[1, 2], [5, 6], [9, 0]], dtype=np.float32),
np.array([[3, 4], [7, 8], [1, 2]], dtype=np.float32)]
self.assertAllEqual(output, expected)
def testStridedSlice(self):
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[], [], []], dtype=np.float32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[], []], dtype=np.float32))
if np.int64 in self.int_types:
self._testNAry(
lambda x: array_ops.strided_slice(*x), [
np.array([[], [], []], dtype=np.float32), np.array(
[1, 0], dtype=np.int64), np.array([3, 0], dtype=np.int64),
np.array([1, 1], dtype=np.int64)
],
expected=np.array([[], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32)],
expected=np.array([[5, 6], [8, 9]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice(*x),
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32)],
expected=np.array([[3, 2], [6, 5]], dtype=np.float32))
self._testNAry(lambda x: x[0][0:2, array_ops.newaxis, ::-1],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[[3, 2, 1]], [[6, 5, 4]]],
dtype=np.float32))
self._testNAry(lambda x: x[0][1, :, array_ops.newaxis],
[np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=np.float32)],
expected=np.array([[4], [5], [6]], dtype=np.float32))
def testStridedSliceGrad(self):
# Tests cases where input shape is empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.array([], dtype=np.int32),
np.float32(0.5)],
expected=np.array(np.float32(0.5), dtype=np.float32))
# Tests case where input shape is non-empty, but gradients are empty.
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([0], dtype=np.int32),
np.array([1], dtype=np.int32),
np.array([], dtype=np.float32)],
expected=np.array([0, 0, 0], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 0], dtype=np.int32),
np.array([1, 0], dtype=np.int32),
np.array([3, 0], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[], []], dtype=np.float32)],
expected=np.array([[], [], []], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([3, 3], dtype=np.int32),
np.array([1, 1], dtype=np.int32),
np.array([[5, 6], [8, 9]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 5, 6], [0, 8, 9]],
dtype=np.float32))
def ssg_test(x):
return array_ops.strided_slice_grad(*x, shrink_axis_mask=0x4,
new_axis_mask=0x1)
self._testNAry(ssg_test,
[np.array([3, 1, 3], dtype=np.int32),
np.array([0, 0, 0, 2], dtype=np.int32),
np.array([0, 3, 1, -4], dtype=np.int32),
np.array([1, 2, 1, -3], dtype=np.int32),
np.array([[[1], [2]]], dtype=np.float32)],
expected=np.array([[[0, 0, 1]], [[0, 0, 0]], [[0, 0, 2]]],
dtype=np.float32))
ssg_test2 = lambda x: array_ops.strided_slice_grad(*x, new_axis_mask=0x15)
self._testNAry(ssg_test2,
[np.array([4, 4], dtype=np.int32),
np.array([0, 0, 0, 1, 0], dtype=np.int32),
np.array([0, 3, 0, 4, 0], dtype=np.int32),
np.array([1, 2, 1, 2, 1], dtype=np.int32),
np.array([[[[[1], [2]]], [[[3], [4]]]]], dtype=np.float32)],
expected=np.array([[0, 1, 0, 2], [0, 0, 0, 0], [0, 3, 0, 4],
[0, 0, 0, 0]], dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
np.array([2, 0], dtype=np.int32),
np.array([1, -1], dtype=np.int32),
np.array([[1, 2], [3, 4]], dtype=np.float32)],
expected=np.array([[0, 2, 1], [0, 4, 3], [0, 0, 0]],
dtype=np.float32))
self._testNAry(lambda x: array_ops.strided_slice_grad(*x),
[np.array([3, 3], dtype=np.int32),
np.array([2, 2], dtype=np.int32),
np.array([0, 1], dtype=np.int32),
np.array([-1, -2], dtype=np.int32),
np.array([[1], [2]], dtype=np.float32)],
expected=np.array([[0, 0, 0], [0, 0, 2], [0, 0, 1]],
dtype=np.float32))
if __name__ == "__main__":
googletest.main()
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/routed-vlan/ipv6/unnumbered/interface-ref/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state for interface-ref
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"routed-vlan",
"ipv6",
"unnumbered",
"interface-ref",
"state",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/unnumbered/interface_ref/state/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/unnumbered/interface_ref/state/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=False)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/unnumbered/interface_ref/state/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /interfaces/interface/routed_vlan/ipv6/unnumbered/interface_ref/state/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=False)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=False,
)
interface = __builtin__.property(_get_interface)
subinterface = __builtin__.property(_get_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
| |
# yellowbrick.bestfit
# Uses Scikit-Learn to compute a best fit function, then draws it in the plot.
#
# Author: Benjamin Bengfort
# Created: Sun Jun 26 17:27:08 2016 -0400
#
# Copyright (C) 2016 The sckit-yb developers
# For license information, see LICENSE.txt
#
# ID: bestfit.py [56236f3] benjamin@bengfort.com $
"""
Uses Scikit-Learn to compute a best fit function, then draws it in the plot.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error as mse
from operator import itemgetter
from yellowbrick.style.palettes import LINE_COLOR
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Module Constants
##########################################################################
# Names of the various estimator functions
LINEAR = "linear"
QUADRATIC = "quadratic"
EXPONENTIAL = "exponential"
LOG = "log"
SELECT_BEST = "select_best"
##########################################################################
## Draw Line of Best Fit
##########################################################################
def draw_best_fit(X, y, ax, estimator="linear", **kwargs):
"""
Uses Scikit-Learn to fit a model to X and y then uses the resulting model
to predict the curve based on the X values. This curve is drawn to the ax
(matplotlib axis) which must be passed as the third variable.
The estimator function can be one of the following:
- ``'linear'``: Uses OLS to fit the regression
- ``'quadratic'``: Uses OLS with Polynomial order 2
- ``'exponential'``: Not implemented yet
- ``'log'``: Not implemented yet
- ``'select_best'``: Selects the best fit via MSE
The remaining keyword arguments are passed to ax.plot to define and
describe the line of best fit.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
estimator : string, default: 'linear'
The name of the estimator function used to draw the best fit line.
The estimator can currently be one of linear, quadratic, exponential,
log, or select_best. The select best method uses the minimum MSE to
select the best fit line.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style and
label the line of best fit. By default, the standard line color is
used unless the color keyword argument is passed in.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
"""
# Estimators are the types of best fit lines that can be drawn.
estimators = {
LINEAR: fit_linear, # Uses OLS to fit the regression
QUADRATIC: fit_quadratic, # Uses OLS with Polynomial order 2
EXPONENTIAL: fit_exponential, # Not implemented yet
LOG: fit_log, # Not implemented yet
SELECT_BEST: fit_select_best, # Selects the best fit via MSE
}
# Check to make sure that a correct estimator value was passed in.
if estimator not in estimators:
raise YellowbrickValueError(
"'{}' not a valid type of estimator; choose from {}".format(
estimator, ", ".join(estimators.keys())
)
)
# Then collect the estimator function from the mapping.
estimator = estimators[estimator]
# Ensure that X and y are the same length
if len(X) != len(y):
raise YellowbrickValueError(
(
"X and y must have same length:" " X len {} doesn't match y len {}!"
).format(len(X), len(y))
)
# Ensure that X and y are np.arrays
X = np.array(X)
y = np.array(y)
# Verify that X is a two dimensional array for Scikit-Learn esitmators
# and that its dimensions are (n, 1) where n is the number of rows.
if X.ndim < 2:
X = X[:, np.newaxis] # Reshape X into the correct dimensions
if X.ndim > 2:
raise YellowbrickValueError(
"X must be a (1,) or (n,1) dimensional array not {}".format(X.shape)
)
# Verify that y is a (n,) dimensional array
if y.ndim > 1:
raise YellowbrickValueError(
"y must be a (1,) dimensional array not {}".format(y.shape)
)
# Uses the estimator to fit the data and get the model back.
model = estimator(X, y)
# Set the color if not passed in.
if "c" not in kwargs and "color" not in kwargs:
kwargs["color"] = LINE_COLOR
# Get the current working axes
ax = ax or plt.gca()
# Plot line of best fit onto the axes that were passed in.
# TODO: determine if xlim or X.min(), X.max() are better params
xr = np.linspace(*ax.get_xlim(), num=100)
ax.plot(xr, model.predict(xr[:, np.newaxis]), **kwargs)
return ax
##########################################################################
## Estimator Functions
##########################################################################
def fit_select_best(X, y):
"""
Selects the best fit of the estimators already implemented by choosing the
model with the smallest mean square error metric for the trained values.
"""
models = [fit(X, y) for fit in [fit_linear, fit_quadratic]]
errors = map(lambda model: mse(y, model.predict(X)), models)
return min(zip(models, errors), key=itemgetter(1))[0]
def fit_linear(X, y):
"""
Uses OLS to fit the regression.
"""
model = linear_model.LinearRegression()
model.fit(X, y)
return model
def fit_quadratic(X, y):
"""
Uses OLS with Polynomial order 2.
"""
model = make_pipeline(PolynomialFeatures(2), linear_model.LinearRegression())
model.fit(X, y)
return model
def fit_exponential(X, y):
"""
Fits an exponential curve to the data.
"""
raise NotImplementedError("Exponential best fit lines are not implemented")
def fit_log(X, y):
"""
Fit a logrithmic curve to the data.
"""
raise NotImplementedError("Logrithmic best fit lines are not implemented")
##########################################################################
## Draw 45 Degree Line
##########################################################################
def draw_identity_line(ax=None, dynamic=True, **kwargs):
"""
Draws a 45 degree identity line such that y=x for all points within the
given axes x and y limits. This function also registeres a callback so
that as the figure is modified, the axes are updated and the line remains
drawn correctly.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
dynamic : bool, default : True
If the plot is dynamic, callbacks will be registered to update the
identiy line as axes are changed.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style the
identity line.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
Notes
-----
.. seealso:: `StackOverflow discussion: Does matplotlib have a function for drawing diagonal lines in axis coordinates? <https://stackoverflow.com/questions/22104256/does-matplotlib-have-a-function-for-drawing-diagonal-lines-in-axis-coordinates>`_
"""
# Get the current working axes
ax = ax or plt.gca()
# Define the standard line color
if "c" not in kwargs and "color" not in kwargs:
kwargs["color"] = LINE_COLOR
# Define the standard opacity
if "alpha" not in kwargs:
kwargs["alpha"] = 0.5
# Draw the identity line
identity, = ax.plot([], [], **kwargs)
# Define the callback
def callback(ax):
# Get the x and y limits on the axes
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Set the bounding range of the line
data = (max(xlim[0], ylim[0]), min(xlim[1], ylim[1]))
identity.set_data(data, data)
# Register the callback and return
callback(ax)
if dynamic:
ax.callbacks.connect("xlim_changed", callback)
ax.callbacks.connect("ylim_changed", callback)
return ax
if __name__ == "__main__":
import os
import pandas as pd
path = os.path.join(
os.path.dirname(__file__), "..", "examples", "data", "concrete.xls"
)
if not os.path.exists(path):
raise Exception("Could not find path for testing")
xkey = "Fine Aggregate (component 7)(kg in a m^3 mixture)"
ykey = "Coarse Aggregate (component 6)(kg in a m^3 mixture)"
data = pd.read_excel(path)
fig, axe = plt.subplots()
axe.scatter(data[xkey], data[ykey])
draw_best_fit(data[xkey], data[ykey], axe, "select_best")
plt.show()
| |
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 4
# learningRate = 0.001
# momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
stepsPerEpocheTrain = len(trainList) / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def inference(images, seqLen):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(conv2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
outputs, _, _ = bidirectional_rnn(forwardH1, backwardH1, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_mean(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
logits3d, seqAfterConv = inference(inputX, seqLengths)
loss = loss(logits3d, targetY, seqAfterConv)
# optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
optimizer = tf.train.AdamOptimizer().minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp4/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 73300.3550894
# Train: CER 0.360185462034
# Train time 1644.25518394
# Val: CTC-loss 1301.99811672
# Val: CER 0.0651899194817
# Val time 53.8596189022
# Epoch 2 ...
# Train: CTC-loss 14313.0424826
# Train: CER 0.0658545976602
# Train time 1660.32220101
# Val: CTC-loss 1134.93752122
# Val: CER 0.0578728136967
# Val time 54.3995099068
# Epoch 3 ...
# Train: CTC-loss 12519.2304724
# Train: CER 0.0576073409296
# Train time 1675.11377215
# Val: CTC-loss 1088.33515568
# Val: CER 0.0513419558555
# Val time 53.7419240475
# Epoch 4 ...
# Train: CTC-loss 11689.1584823
# Train: CER 0.0541007722338
# Train time 1735.50320292
# Val: CTC-loss 1063.68676706
# Val: CER 0.0510597730676
# Val time 53.8123121262
# Epoch 5 ...
# Train: CTC-loss 11095.6933901
# Train: CER 0.05163765061
# Train time 1792.59294605
# Val: CTC-loss 1133.50980034
# Val: CER 0.0524711851875
# Val time 53.9744770527
# Epoch 6 ...
# Train: CTC-loss 10791.8420103
# Train: CER 0.0502912844581
# Train time 1857.39222693
# Val: CTC-loss 1116.46805943
# Val: CER 0.0521457815419
# Val time 54.4395420551
# Epoch 7 ...
# Train: CTC-loss 10485.4801794
# Train: CER 0.0491253335559
# Train time 1963.77380419
# Val: CTC-loss 1099.46129447
# Val: CER 0.049517301783
# Val time 54.5394759178
# Epoch 8 ...
# Train: CTC-loss 10348.2202144
# Train: CER 0.0481146517714
# Train time 2426.47800994
# Val: CTC-loss 1046.17936176
# Val: CER 0.0474031017423
# Val time 134.446013927
# Epoch 9 ...
# Train: CTC-loss 10197.3099074
# Train: CER 0.0477043093231
# Train time 7824.21876001
# Val: CTC-loss 1124.89474004
# Val: CER 0.0514842471878
# Val time 252.000334024
# Epoch 10 ...
# Train: CTC-loss 10092.6621583
# Train: CER 0.0474468553934
# Train time 10360.6413701
# Val: CTC-loss 1099.01140712
# Val: CER 0.0516711436361
# Val time 249.681378841
# Epoch 11 ...
# Train: CTC-loss 10178.3684522
# Train: CER 0.0475410352993
# Train time 10878.222168
# Val: CTC-loss 1083.66041013
# Val: CER 0.0508877646675
# Val time 251.609441996
# Epoch 12 ...
# Train: CTC-loss 10334.6522735
# Train: CER 0.0478250410284
# Train time 11397.775522
# Val: CTC-loss 1086.69757615
# Val: CER 0.0513977496425
# Val time 261.578649044
# Epoch 13 ...
# Train: CTC-loss 10153.5927667
# Train: CER 0.0474821657583
# Train time 10354.784452
# Val: CTC-loss 1134.44932381
# Val: CER 0.0510351066341
# Val time 218.338672161
# Epoch 14 ...
# Train: CTC-loss 10120.6358578
# Train: CER 0.04686203232
# Train time 9541.03669
# Val: CTC-loss 1132.0122739
# Val: CER 0.0502342694104
# Val time 162.695705891
# Epoch 15 ...
# Train: CTC-loss 10186.226971
# Train: CER 0.0475344161096
# Train time 8939.98238707
# Val: CTC-loss 1127.0993686
# Val: CER 0.0507292237629
# Val time 192.596905947
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard Summary Writer for TensorFlow Eager Execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.contrib.summary import gen_summary_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.ops import variable_scope
def _maybe_cpu(v):
if isinstance(v, (ops.EagerTensor, ops.Tensor)):
return v.cpu()
else:
return v
def _summary_writer_function(name, tensor, function, family=None):
def record():
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
function(tag, scope)
return True
return record
class SummaryWriter(object):
"""Writes summaries for TensorBoard, compatible with eager execution.
This class is the supported way of writing TensorBoard summaries under
eager execution.
"""
_CPU_DEVICE = "cpu:0"
def __init__(self,
logdir,
max_queue=10,
flush_secs=120,
filename_suffix=""):
"""Summary writer for TensorBoard, compatible with eager execution.
If necessary, multiple instances of `SummaryWriter` can be created, with
distinct `logdir`s and `name`s. Each `SummaryWriter` instance will retain
its independent `global_step` counter and data writing destination.
Example:
```python
writer = tfe.SummaryWriter("my_model")
# ... Code that sets up the model and data batches ...
for _ in xrange(train_iters):
loss = model.train_batch(batch)
writer.scalar("loss", loss)
writer.step()
```
Args:
logdir: Directory in which summary files will be written.
max_queue: Number of summary items to buffer before flushing to
filesystem. If 0, summaries will be flushed immediately.
flush_secs: Number of secondsbetween forced commits to disk.
filename_suffix: Suffix of the event protobuf files in which the summary
data are stored.
Raises:
ValueError: If this constructor is called not under eager execution.
"""
# TODO(apassos, ashankar): Make this class and the underlying
# contrib.summary_ops compatible with graph model and remove this check.
if not context.in_eager_mode():
raise ValueError(
"Use of SummaryWriter is currently supported only with eager "
"execution enabled. File an issue at "
"https://github.com/tensorflow/tensorflow/issues/new to express "
"interest in fixing this.")
# TODO(cais): Consider adding name keyword argument, which if None or empty,
# will register the global global_step that training_util.get_global_step()
# can find.
with context.device(self._CPU_DEVICE):
self._name = uuid.uuid4().hex
self._global_step = 0
self._global_step_tensor = variable_scope.get_variable(
"global_step/summary_writer/" + self._name,
shape=[], dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
self._global_step_dirty = False
self._resource = gen_summary_ops.summary_writer(shared_name=self._name)
gen_summary_ops.create_summary_file_writer(
self._resource, logdir, max_queue, flush_secs, filename_suffix)
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device=self._CPU_DEVICE)
def step(self):
"""Increment the global step counter of this SummaryWriter instance."""
self._global_step += 1
self._global_step_dirty = True
@property
def global_step(self):
"""Obtain the current global_step value of this SummaryWriter instance.
Returns:
An `int` representing the current value of the global_step of this
`SummaryWriter` instance.
"""
return self._global_step
def _update_global_step_tensor(self):
with context.device(self._CPU_DEVICE):
if self._global_step_dirty:
self._global_step_dirty = False
return state_ops.assign(self._global_step_tensor, self._global_step)
else:
return self._global_step_tensor
def generic(self, name, tensor, metadata, family=None):
"""Write a generic-type summary.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A `Tensor` or compatible value type containing the value of the
summary.
metadata: Metadata about the summary.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_summary(
self._resource,
self._update_global_step_tensor(),
_maybe_cpu(tensor),
tag,
_maybe_cpu(metadata),
name=scope)
def scalar(self, name, tensor, family=None):
"""Write a scalar summary.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric `Tensor` or compatible value type containing a
single value.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A summary writer function for scalars.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_scalar_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_cpu(tensor), name=scope)
def histogram(self, name, tensor, family=None):
"""Write a histogram summary.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A real numeric `Tensor` or compatible value type. Any shape.
Values to use to build the histogram.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_histogram_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_cpu(tensor), name=scope)
def image(self, name, tensor, bad_color=None, max_images=3, family=None):
"""Write an image summary."""
with context.device(self._CPU_DEVICE):
if bad_color is None:
bad_color_ = constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_image_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_cpu(tensor), bad_color_, max_images,
name=scope)
def audio(self, name, tensor, sample_rate, max_outputs, family=None):
"""Write an audio summary.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`, or
compatible value type.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_audio_summary(
self._resource, self._update_global_step_tensor(),
tag,
_maybe_cpu(tensor),
sample_rate=_maybe_cpu(sample_rate),
max_outputs=max_outputs,
name=scope)
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: encoder.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='encoder.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\rencoder.proto\"\x1c\n\rEncodeRequest\x12\x0b\n\x03url\x18\x01 \x01(\t\"\x1c\n\x0e\x45ncodeResponse\x12\n\n\x02id\x18\x01 \x01(\r\"\x1b\n\rDecodeRequest\x12\n\n\x02id\x18\x01 \x01(\r\"\x1d\n\x0e\x44\x65\x63odeResponse\x12\x0b\n\x03url\x18\x01 \x01(\t2c\n\x07\x45ncoder\x12+\n\x06\x65ncode\x12\x0e.EncodeRequest\x1a\x0f.EncodeResponse\"\x00\x12+\n\x06\x64\x65\x63ode\x12\x0e.DecodeRequest\x1a\x0f.DecodeResponse\"\x00\x62\x06proto3')
)
_ENCODEREQUEST = _descriptor.Descriptor(
name='EncodeRequest',
full_name='EncodeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='EncodeRequest.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=17,
serialized_end=45,
)
_ENCODERESPONSE = _descriptor.Descriptor(
name='EncodeResponse',
full_name='EncodeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='EncodeResponse.id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=47,
serialized_end=75,
)
_DECODEREQUEST = _descriptor.Descriptor(
name='DecodeRequest',
full_name='DecodeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='DecodeRequest.id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=104,
)
_DECODERESPONSE = _descriptor.Descriptor(
name='DecodeResponse',
full_name='DecodeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='DecodeResponse.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=106,
serialized_end=135,
)
DESCRIPTOR.message_types_by_name['EncodeRequest'] = _ENCODEREQUEST
DESCRIPTOR.message_types_by_name['EncodeResponse'] = _ENCODERESPONSE
DESCRIPTOR.message_types_by_name['DecodeRequest'] = _DECODEREQUEST
DESCRIPTOR.message_types_by_name['DecodeResponse'] = _DECODERESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EncodeRequest = _reflection.GeneratedProtocolMessageType('EncodeRequest', (_message.Message,), dict(
DESCRIPTOR = _ENCODEREQUEST,
__module__ = 'encoder_pb2'
# @@protoc_insertion_point(class_scope:EncodeRequest)
))
_sym_db.RegisterMessage(EncodeRequest)
EncodeResponse = _reflection.GeneratedProtocolMessageType('EncodeResponse', (_message.Message,), dict(
DESCRIPTOR = _ENCODERESPONSE,
__module__ = 'encoder_pb2'
# @@protoc_insertion_point(class_scope:EncodeResponse)
))
_sym_db.RegisterMessage(EncodeResponse)
DecodeRequest = _reflection.GeneratedProtocolMessageType('DecodeRequest', (_message.Message,), dict(
DESCRIPTOR = _DECODEREQUEST,
__module__ = 'encoder_pb2'
# @@protoc_insertion_point(class_scope:DecodeRequest)
))
_sym_db.RegisterMessage(DecodeRequest)
DecodeResponse = _reflection.GeneratedProtocolMessageType('DecodeResponse', (_message.Message,), dict(
DESCRIPTOR = _DECODERESPONSE,
__module__ = 'encoder_pb2'
# @@protoc_insertion_point(class_scope:DecodeResponse)
))
_sym_db.RegisterMessage(DecodeResponse)
_ENCODER = _descriptor.ServiceDescriptor(
name='Encoder',
full_name='Encoder',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=137,
serialized_end=236,
methods=[
_descriptor.MethodDescriptor(
name='encode',
full_name='Encoder.encode',
index=0,
containing_service=None,
input_type=_ENCODEREQUEST,
output_type=_ENCODERESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='decode',
full_name='Encoder.decode',
index=1,
containing_service=None,
input_type=_DECODEREQUEST,
output_type=_DECODERESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ENCODER)
DESCRIPTOR.services_by_name['Encoder'] = _ENCODER
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class EncoderStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.encode = channel.unary_unary(
'/Encoder/encode',
request_serializer=EncodeRequest.SerializeToString,
response_deserializer=EncodeResponse.FromString,
)
self.decode = channel.unary_unary(
'/Encoder/decode',
request_serializer=DecodeRequest.SerializeToString,
response_deserializer=DecodeResponse.FromString,
)
class EncoderServicer(object):
# missing associated documentation comment in .proto file
pass
def encode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def decode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EncoderServicer_to_server(servicer, server):
rpc_method_handlers = {
'encode': grpc.unary_unary_rpc_method_handler(
servicer.encode,
request_deserializer=EncodeRequest.FromString,
response_serializer=EncodeResponse.SerializeToString,
),
'decode': grpc.unary_unary_rpc_method_handler(
servicer.decode,
request_deserializer=DecodeRequest.FromString,
response_serializer=DecodeResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Encoder', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaEncoderServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def encode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def decode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaEncoderStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def encode(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
encode.future = None
def decode(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
decode.future = None
def beta_create_Encoder_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('Encoder', 'decode'): DecodeRequest.FromString,
('Encoder', 'encode'): EncodeRequest.FromString,
}
response_serializers = {
('Encoder', 'decode'): DecodeResponse.SerializeToString,
('Encoder', 'encode'): EncodeResponse.SerializeToString,
}
method_implementations = {
('Encoder', 'decode'): face_utilities.unary_unary_inline(servicer.decode),
('Encoder', 'encode'): face_utilities.unary_unary_inline(servicer.encode),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Encoder_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('Encoder', 'decode'): DecodeRequest.SerializeToString,
('Encoder', 'encode'): EncodeRequest.SerializeToString,
}
response_deserializers = {
('Encoder', 'decode'): DecodeResponse.FromString,
('Encoder', 'encode'): EncodeResponse.FromString,
}
cardinalities = {
'decode': cardinality.Cardinality.UNARY_UNARY,
'encode': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'Encoder', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Loss operations for use in neural networks.
The loss ops measure error for use in neural networks. These losses
can be used for measuring accuracy of a network in a regression task
or for regularization purposes (e.g., weight decay).
These loss ops are, by design, minimal, enabling flexibility in how
their output can be used.
@@absolute
@@squared
@@logistic
@@softmax
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = ["absolute", "squared", "logistic", "softmax"]
def _reduce_batch(x, reduce_fn, name=None):
"""Given a tensor `x`, calls reduce_fn to reduce it across dimensions.
Given a tensor with number of dimensions > 1, _reduce_batch will reduce the
tensor across all dimensions except for dimension 0. As an example, given a
tensor of shape [batch_size, d1, d2], this function will reduce across
dimensions d1 and d2, returning a tensor of shape [batch_size].
Tensors of dimension 1 are returned as-is, while tensors of dimension 0
raise a ValueError.
Args:
x: A `Tensor` with dimension > 0.
reduce_fn: A math_ops reduce function that takes arguments of
`x`, `reduction_indices`, and `name`.
name: A name for the operation (optional).
Returns:
A `Tensor` with values reduced by reduce_fn across all dimensions > 0.
Raises:
ValueError: If `x` has dimension 0.
"""
x = ops.convert_to_tensor(x, name="x")
with ops.op_scope([x], name, "reduce_batch"):
ndims = x.get_shape().ndims
if ndims == 0:
raise ValueError("Cannot reduce a scalar into batches.")
elif ndims == 1:
return x # Don't include a useless reduction.
elif ndims:
reduction_indices = math_ops.range(1, ndims)
shape = [x.get_shape().dims[0]]
else:
reduction_indices = math_ops.range(1, array_ops.size(array_ops.shape(x)))
shape = [None] # We don't know much about the shape, but it is rank 1.
result = reduce_fn(x, reduction_indices=reduction_indices)
# Give a shape hint in case we have extra information.
result.set_shape(shape)
return result
def _reduce_batch_sum(x, name=None):
"""Given a tensor `x`, sums across all dimensions except dimension 0.
Given a tensor with the number of dimensions > 1, this will sum across all
dimensions except for dimension 0. This function is useful for summing the
loss (error) across all examples in a batch when training. As an example,
given a tensor of shape [batch_size, d1, d2], this function will sum across
dimensions d1 and d2, returning a tensor of shape [batch_size].
Tensors of dimension 1 are returned as-is, while tensors of dimension 0
raise a ValueError.
Args:
x: A `Tensor` with dimension > 0.
name: A name for the operation (optional).
Returns:
A `Tensor` with values summed across all dimensions > 0.
Raises:
ValueError: If `x` has dimension 0.
"""
return _reduce_batch(x, math_ops.reduce_sum, name)
def _reduce_to_scalar(x, name=None):
"""Reduces losses to a scalar.
Given a tensor `x`, sums across all dimensions except dimension 0, then
average across dimension 0.
Args:
x: A `Tensor` with dimension > 0.
name: A name for the operation (optional).
Returns:
Caculate sum of losses per example, then average across batch.
"""
with ops.op_scope([x], name, "scalar") as scope:
return math_ops.reduce_mean(_reduce_batch_sum(x), name=scope)
def _validate_predicted_and_target(predicted, target):
# TODO(ptucker): Optionally add assert op for shape check, for cases when
# shape is not fully defined at graph construction time?
predicted.get_shape().assert_is_compatible_with(target.get_shape())
tensor_util.assert_same_float_dtype([predicted, target])
def _raw_absolute(predicted, target, name=None):
"""Computes and returns the per-example absolute loss.
Computes the per-example absolute value of the difference between
the target and predicted tensors. The tensors must have the same
shape.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size, dim_1, ..., dim_n]` tensor of per-example absolute losses.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target], name, "absolute_loss") as scope:
predicted = ops.convert_to_tensor(predicted, name="predicted")
target = ops.convert_to_tensor(target, name="target")
_validate_predicted_and_target(predicted, target)
return math_ops.abs(target - predicted, name=scope)
def _raw_squared(predicted, target, name=None):
"""Computes and returns the per-example squared loss, divided by 2.
Computes the per-example squared difference between the target and
predicted tensors. The tensors must have the same shape.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size, dim_1, ..., dim_n]` tensor of per-example squared losses.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target], name, "squared_loss") as scope:
predicted = ops.convert_to_tensor(predicted, name="predicted")
target = ops.convert_to_tensor(target, name="target")
_validate_predicted_and_target(predicted, target)
return math_ops.div(math_ops.square(target - predicted), 2.0, name=scope)
def absolute(predicted, target, name=None):
"""Reduces absolute losses to a scalar.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
Caculate sum of absolute losses per example, then average across batch.
"""
with ops.op_scope([predicted, target], name, "absolute_loss") as scope:
return _reduce_to_scalar(_raw_absolute(predicted, target), name=scope)
def squared(predicted, target, name=None):
"""Reduces squared losses to a scalar.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
Caculate sum of squared losses per example, then average across batch.
"""
with ops.op_scope([predicted, target], name, "squared_loss") as scope:
return _reduce_to_scalar(_raw_squared(predicted, target), name=scope)
def logistic(logit, target, name=None):
"""Calculates the logistic cross-entropy loss, averaged across batches.
**WARNING:** `logit` must be unscaled, while the `target` should be a
normalized probability prediction. See
`tf.nn.sigmoid_cross_entropy_with_logits` for more details.
Args:
logit: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted logit values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`logit` tensor.
name: A name for the operation (optional).
Returns:
A scalar `tensor` of the logistic cross-entropy loss, averaged across
batches.
Raises:
ValueError: If `logit` and `target` shapes do not match.
"""
with ops.op_scope([logit, target], name, "logistic_loss") as scope:
return _reduce_to_scalar(
nn.sigmoid_cross_entropy_with_logits(logit, target), name=scope)
def softmax(logit, target, name=None):
"""Calculates the softmax cross-entropy loss, averaged across batches.
**WARNING:** `logit` must be unscaled, while the `target` should be a
normalized probability prediction. See
`tf.nn.sigmoid_cross_entropy_with_logits` for more details.
Args:
logit: Tensor of actual values. Shape must have rank 2, generally
(batch, num_classes). num_classes must be > 1. For single-class
regression, use `logistic`. Type must be `tf.float32` or `tf.float64`.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`logit` tensor.
name: A name for the operation (optional).
Returns:
A scalar `tensor` of the softmax cross-entropy loss, averaged across
batches.
Raises:
ValueError: If `logit` and `target` shapes do not match.
"""
with ops.op_scope([logit, target], name, "softmax_loss") as scope:
shape = logit.get_shape().with_rank(2)
if shape.dims[1] and shape.dims[1] < 2:
raise ValueError(
"Invalid shape %s; use logistic() instead for only 1 class." %
shape)
return _reduce_to_scalar(
nn.softmax_cross_entropy_with_logits(logit, target), name=scope)
| |
import asyncio
import os
import traceback
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from signal import (
SIGTERM, SIGINT,
signal as signal_func,
Signals
)
from socket import (
socket,
SOL_SOCKET,
SO_REUSEADDR,
)
from time import time
from httptools import HttpRequestParser
from httptools.parser.errors import HttpParserError
try:
import uvloop as async_loop
except ImportError:
async_loop = asyncio
from sanic.log import log, netlog
from sanic.response import HTTPResponse
from sanic.request import Request
from sanic.exceptions import (
RequestTimeout, PayloadTooLarge, InvalidUsage, ServerError)
current_time = None
class Signal:
stopped = False
class CIDict(dict):
"""Case Insensitive dict where all keys are converted to lowercase
This does not maintain the inputted case when calling items() or keys()
in favor of speed, since headers are case insensitive
"""
def get(self, key, default=None):
return super().get(key.casefold(), default)
def __getitem__(self, key):
return super().__getitem__(key.casefold())
def __setitem__(self, key, value):
return super().__setitem__(key.casefold(), value)
def __contains__(self, key):
return super().__contains__(key.casefold())
class HttpProtocol(asyncio.Protocol):
__slots__ = (
# event loop, connection
'loop', 'transport', 'connections', 'signal',
# request params
'parser', 'request', 'url', 'headers',
# request config
'request_handler', 'request_timeout', 'request_max_size',
'request_class', 'is_request_stream', 'router',
# enable or disable access log / error log purpose
'has_log',
# connection management
'_total_request_size', '_timeout_handler', '_last_communication_time',
'_is_stream_handler')
def __init__(self, *, loop, request_handler, error_handler,
signal=Signal(), connections=set(), request_timeout=60,
request_max_size=None, request_class=None, has_log=True,
keep_alive=True, is_request_stream=False, router=None,
state=None, debug=False, **kwargs):
self.loop = loop
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.has_log = has_log
self.connections = connections
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._total_request_size = 0
self._timeout_handler = None
self._last_request_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b''
self.state = state if state else {}
if 'requests_count' not in self.state:
self.state['requests_count'] = 0
self._debug = debug
@property
def keep_alive(self):
return (
self._keep_alive and
not self.signal.stopped and
self.parser.should_keep_alive())
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._timeout_handler = self.loop.call_later(
self.request_timeout, self.connection_timeout)
self.transport = transport
self._last_request_time = current_time
def connection_lost(self, exc):
self.connections.discard(self)
self._timeout_handler.cancel()
def connection_timeout(self):
# Check if
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._timeout_handler = (
self.loop.call_later(time_left, self.connection_timeout))
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
try:
raise RequestTimeout('Request Timeout')
except RequestTimeout as exception:
self.write_error(exception)
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
exception = PayloadTooLarge('Payload Too Large')
self.write_error(exception)
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state['requests_count'] = self.state['requests_count'] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = 'Bad Request'
if self._debug:
message += '\n' + traceback.format_exc()
exception = InvalidUsage(message)
self.write_error(exception)
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if self._header_fragment == b'Content-Length' \
and int(value) > self.request_max_size:
exception = PayloadTooLarge('Payload Too Large')
self.write_error(exception)
self.headers.append(
(self._header_fragment.decode().casefold(),
value.decode()))
self._header_fragment = b''
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=CIDict(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport
)
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request)
if self._is_stream_handler:
self.request.stream = asyncio.Queue()
self.execute_request_handler()
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(body))
return
self.request.body.append(body)
def on_message_complete(self):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None))
return
self.request.body = b''.join(self.request.body)
self.execute_request_handler()
def execute_request_handler(self):
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request,
self.write_response,
self.stream_response))
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive,
self.request_timeout))
if self.has_log:
netlog.info('', extra={
'status': response.status,
'byte': len(response.body),
'host': '{0}:{1}'.format(self.request.ip[0],
self.request.ip[1]),
'request': '{0} {1}'.format(self.request.method,
self.request.url)
})
except AttributeError:
log.error(
('Invalid response object for url {}, '
'Expected Type: HTTPResponse, Actual Type: {}').format(
self.url, type(response)))
self.write_error(ServerError('Invalid response type'))
except RuntimeError:
log.error(
'Connection lost before response written @ {}'.format(
self.request.ip))
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(
repr(e)))
finally:
if not keep_alive:
self.transport.close()
else:
self._last_request_time = current_time
self.cleanup()
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
try:
keep_alive = self.keep_alive
response.transport = self.transport
await response.stream(
self.request.version, keep_alive, self.request_timeout)
if self.has_log:
netlog.info('', extra={
'status': response.status,
'byte': -1,
'host': '{0}:{1}'.format(self.request.ip[0],
self.request.ip[1]),
'request': '{0} {1}'.format(self.request.method,
self.request.url)
})
except AttributeError:
log.error(
('Invalid response object for url {}, '
'Expected Type: HTTPResponse, Actual Type: {}').format(
self.url, type(response)))
self.write_error(ServerError('Invalid response type'))
except RuntimeError:
log.error(
'Connection lost before response written @ {}'.format(
self.request.ip))
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(
repr(e)))
finally:
if not keep_alive:
self.transport.close()
else:
self._last_request_time = current_time
self.cleanup()
def write_error(self, exception):
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else '1.1'
self.transport.write(response.output(version))
except RuntimeError:
log.error(
'Connection lost before error written @ {}'.format(
self.request.ip if self.request else 'Unknown'))
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True)
finally:
if self.has_log:
extra = dict()
if isinstance(response, HTTPResponse):
extra['status'] = response.status
extra['byte'] = len(response.body)
else:
extra['status'] = 0
extra['byte'] = -1
if self.request:
extra['host'] = '%s:%d' % self.request.ip,
extra['request'] = '%s %s' % (self.request.method,
self.url)
else:
extra['host'] = 'UNKNOWN'
extra['request'] = 'nil'
if self.parser and not (self.keep_alive
and extra['status'] == 408):
netlog.info('', extra=extra)
self.transport.close()
def bail_out(self, message, from_error=False):
if from_error or self.transport.is_closing():
log.error(
("Transport closed @ {} and exception "
"experienced during error handling").format(
self.transport.get_extra_info('peername')))
log.debug(
'Exception:\n{}'.format(traceback.format_exc()))
else:
exception = ServerError(message)
self.write_error(exception)
log.error(message)
def cleanup(self):
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def update_current_time(loop):
"""Cache the current time, since it is needed at the end of every
keep-alive request to update the request timeout time
:param loop:
:return:
"""
global current_time
current_time = time()
loop.call_later(1, partial(update_current_time, loop))
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
def serve(host, port, request_handler, error_handler, before_start=None,
after_start=None, before_stop=None, after_stop=None, debug=False,
request_timeout=60, ssl=None, sock=None, request_max_size=None,
reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100,
register_sys_signals=True, run_async=False, connections=None,
signal=Signal(), request_class=None, has_log=True, keep_alive=True,
is_request_stream=False, router=None, websocket_max_size=None,
websocket_max_queue=None, state=None,
graceful_shutdown_timeout=15.0):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param request_handler: Sanic request handler with middleware
:param error_handler: Sanic error handler with middleware
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param debug: enables debug output (slows server)
:param request_timeout: time in seconds
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param request_max_size: size in bytes, `None` for no limit
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param protocol: subclass of asyncio protocol class
:param request_class: Request class to use
:param has_log: disable/enable access log and error log
:param is_request_stream: disable/enable Request.stream
:param router: Router object
:return: Nothing
"""
if not run_async:
loop = async_loop.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
request_max_size=request_max_size,
request_class=request_class,
has_log=has_log,
keep_alive=keep_alive,
is_request_stream=is_request_stream,
router=router,
websocket_max_size=websocket_max_size,
websocket_max_queue=websocket_max_queue,
state=state,
debug=debug,
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog
)
# Instead of pulling time at the end of every request,
# pull it once per minute
loop.call_soon(partial(update_current_time, loop))
if run_async:
return server_coroutine
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except:
log.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Register signals for graceful termination
if register_sys_signals:
for _signal in (SIGINT, SIGTERM):
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
log.warn('Sanic tried to use loop.add_signal_handler but it is'
' not implemented on this platform.')
pid = os.getpid()
try:
log.info('Starting worker [{}]'.format(pid))
loop.run_forever()
finally:
log.info("Stopping worker [{}]".format(pid))
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while connections and (start_shutdown < graceful_shutdown_timeout):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection(force=True))
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=loop)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings['reuse_port'] = True
# Handling when custom socket is not provided.
if server_settings.get('sock') is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings['host'], server_settings['port']))
sock.set_inheritable(True)
server_settings['sock'] = sock
server_settings['host'] = None
server_settings['port'] = None
def sig_handler(signal, frame):
log.info("Received signal {}. Shutting down.".format(
Signals(signal).name))
for process in processes:
os.kill(process.pid, SIGINT)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
processes = []
for _ in range(workers):
process = Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get('sock').close()
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import validators as utils_validators
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.volumes.snapshots.forms \
import populate_status_choices
from openstack_dashboard.dashboards.project.volumes.volumes \
import forms as project_forms
# This set of states was pulled from cinder's admin_actions.py
STATUS_CHOICES = (
('attaching', _('Attaching')),
('available', _('Available')),
('creating', _('Creating')),
('deleting', _('Deleting')),
('detaching', _('Detaching')),
('error', _('Error')),
('error_deleting', _('Error Deleting')),
('in-use', _('In Use')),
)
class ManageVolume(forms.SelfHandlingForm):
identifier = forms.CharField(
max_length=255,
label=_("Identifier"),
help_text=_("Name or other identifier for existing volume"))
id_type = forms.ChoiceField(
label=_("Identifier Type"),
help_text=_("Type of backend device identifier provided"))
host = forms.CharField(
max_length=255,
label=_("Host"),
help_text=_("Cinder host on which the existing volume resides; "
"takes the form: host@backend-name#pool"))
name = forms.CharField(
max_length=255,
label=_("Volume Name"),
required=False,
help_text=_("Volume name to be assigned"))
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width', 'rows': 4}),
label=_("Description"), required=False)
metadata = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width', 'rows': 2}),
label=_("Metadata"), required=False,
help_text=_("Comma-separated key=value pairs"),
validators=[utils_validators.validate_metadata])
volume_type = forms.ChoiceField(
label=_("Volume Type"),
required=False)
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
required=False)
bootable = forms.BooleanField(
label=_("Bootable"),
required=False,
help_text=_("Specifies that the newly created volume "
"should be marked as bootable"))
def __init__(self, request, *args, **kwargs):
super(ManageVolume, self).__init__(request, *args, **kwargs)
self.fields['id_type'].choices = [("source-name", _("Name"))] + \
[("source-id", _("ID"))]
volume_types = cinder.volume_type_list(request)
self.fields['volume_type'].choices = [("", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
self.fields['availability_zone'].choices = \
project_forms.availability_zones(request)
def handle(self, request, data):
try:
az = data.get('availability_zone')
# assume user enters metadata with "key1=val1,key2=val2"
# convert to dictionary
metadataDict = {}
metadata = data.get('metadata')
if metadata:
metadata.replace(" ", "")
for item in metadata.split(','):
key, value = item.split('=')
metadataDict[key] = value
cinder.volume_manage(request,
host=data['host'],
identifier=data['identifier'],
id_type=data['id_type'],
name=data['name'],
description=data['description'],
volume_type=data['volume_type'],
availability_zone=az,
metadata=metadataDict,
bootable=data['bootable'])
# for success message, use identifier if user does not
# provide a volume name
volume_name = data['name']
if not volume_name:
volume_name = data['identifier']
messages.success(
request,
_('Successfully sent the request to manage volume: %s')
% volume_name)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to manage volume."),
redirect=redirect)
class UnmanageVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.CharField(label=_("Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_id = forms.CharField(label=_("ID"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
def handle(self, request, data):
try:
cinder.volume_unmanage(request, self.initial['volume_id'])
messages.success(
request,
_('Successfully sent the request to unmanage volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _("Unable to unmanage volume."),
redirect=redirect)
class MigrateVolume(forms.SelfHandlingForm):
name = forms.CharField(label=_("Volume Name"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
current_host = forms.CharField(label=_("Current Host"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
host = forms.ChoiceField(label=_("Destination Host"),
help_text=_("Choose a Host to migrate to."))
force_host_copy = forms.BooleanField(label=_("Force Host Copy"),
initial=False, required=False)
def __init__(self, request, *args, **kwargs):
super(MigrateVolume, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
self.fields['host'].choices = self.populate_host_choices(request,
initial)
def populate_host_choices(self, request, initial):
hosts = initial.get('hosts')
current_host = initial.get('current_host')
host_list = [(host.name, host.name)
for host in hosts
if host.name != current_host]
if host_list:
host_list.insert(0, ("", _("Select a new host")))
else:
host_list.insert(0, ("", _("No other hosts available")))
return sorted(host_list)
def handle(self, request, data):
try:
cinder.volume_migrate(request,
self.initial['volume_id'],
data['host'],
data['force_host_copy'])
messages.success(
request,
_('Successfully sent the request to migrate volume: %s')
% data['name'])
return True
except Exception:
redirect = reverse("horizon:admin:volumes:volumes_tab")
exceptions.handle(request, _("Failed to migrate volume."),
redirect=redirect)
class CreateVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
vol_type_description = forms.CharField(
max_length=255,
widget=forms.Textarea(
attrs={'class': 'modal-body-fixed-width',
'rows': 4}),
label=_("Description"),
required=False)
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if len(cleaned_name.strip()) == 0:
raise ValidationError(_('Volume type name can not be empty.'))
return cleaned_name
def handle(self, request, data):
try:
# Remove any new lines in the public key
volume_type = cinder.volume_type_create(
request,
data['name'],
data['vol_type_description'])
messages.success(request, _('Successfully created volume type: %s')
% data['name'])
return volume_type
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create volume type.'),
redirect=redirect)
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
super(UpdateStatus, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
self.fields['status'].choices = (
populate_status_choices(initial, STATUS_CHOICES))
def handle(self, request, data):
# Obtain the localized status for including in the message
for choice in self.fields['status'].choices:
if choice[0] == data['status']:
new_status = choice[1]
break
else:
new_status = data['status']
try:
cinder.volume_reset_state(request,
self.initial['volume_id'],
data['status'])
messages.success(request,
_('Successfully updated volume status to "%s".') %
new_status)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume status to "%s".') %
new_status, redirect=redirect)
class CreateQosSpec(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
consumer = forms.ChoiceField(label=_("Consumer"),
choices=cinder.CONSUMER_CHOICES)
def handle(self, request, data):
try:
qos_spec = cinder.qos_spec_create(request,
data['name'],
{'consumer': data['consumer']})
messages.success(request,
_('Successfully created QoS Spec: %s')
% data['name'])
return qos_spec
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create QoS Spec.'),
redirect=redirect)
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for Hue.
#
# Local customizations are done by symlinking a file
# as local_settings.py.
import logging
import os
import pkg_resources
import sys
from guppy import hpy
from django.utils.translation import ugettext_lazy as _
import desktop.redaction
from desktop.lib.paths import get_desktop_root
from desktop.lib.python_util import force_dict_to_strings
from aws.conf import is_enabled as is_s3_enabled
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '..', '..', '..'))
HUE_DESKTOP_VERSION = pkg_resources.get_distribution("desktop").version or "Unknown"
NICE_NAME = "Hue"
ENV_HUE_PROCESS_NAME = "HUE_PROCESS_NAME"
ENV_DESKTOP_DEBUG = "DESKTOP_DEBUG"
############################################################
# Part 1: Logging and imports.
############################################################
# Configure debug mode
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Start basic logging as soon as possible.
if ENV_HUE_PROCESS_NAME not in os.environ:
_proc = os.path.basename(len(sys.argv) > 1 and sys.argv[1] or sys.argv[0])
os.environ[ENV_HUE_PROCESS_NAME] = _proc
desktop.log.basic_logging(os.environ[ENV_HUE_PROCESS_NAME])
logging.info("Welcome to Hue " + HUE_DESKTOP_VERSION)
# Then we can safely import some more stuff
from desktop import appmanager
from desktop.lib import conf
# Add fancy logging
desktop.log.fancy_logging()
############################################################
# Part 2: Generic Configuration
############################################################
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = [
('de', _('German')),
('en-us', _('English')),
('es', _('Spanish')),
('fr', _('French')),
('ja', _('Japanese')),
('ko', _('Korean')),
('pt', _('Portuguese')),
('pt_BR', _('Brazilian Portuguese')),
('zh_CN', _('Simplified Chinese')),
]
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
############################################################
# Part 3: Django configuration
############################################################
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'desktop', 'libs', 'indexer', 'src', 'indexer', 'static'),
os.path.join(BASE_DIR, 'desktop', 'libs', 'notebook', 'src', 'notebook', 'static'),
os.path.join(BASE_DIR, 'desktop', 'libs', 'liboauth', 'src', 'liboauth', 'static'),
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
# For Django admin interface
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'build', 'static')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
MIDDLEWARE_CLASSES = [
# The order matters
'desktop.middleware.MetricsMiddleware',
'desktop.middleware.EnsureSafeMethodMiddleware',
'desktop.middleware.AuditLoggingMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'desktop.middleware.SpnegoMiddleware',
'desktop.middleware.HueRemoteUserMiddleware',
'django.middleware.locale.LocaleMiddleware',
'babeldjango.middleware.LocaleMiddleware',
'desktop.middleware.AjaxMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'desktop.middleware.ContentSecurityPolicyMiddleware',
# Must be after Session, Auth, and Ajax. Before everything else.
'desktop.middleware.LoginAndPermissionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'desktop.middleware.NotificationMiddleware',
'desktop.middleware.ExceptionMiddleware',
'desktop.middleware.ClusterMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware'
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'axes.middleware.FailedLoginMiddleware',
'desktop.middleware.MimeTypeJSFileFixStreamingMiddleware',
]
# if os.environ.get(ENV_DESKTOP_DEBUG):
# MIDDLEWARE_CLASSES.append('desktop.middleware.HtmlValidationMiddleware')
# logging.debug("Will try to validate generated HTML.")
ROOT_URLCONF = 'desktop.urls'
# Hue runs its own wsgi applications
WSGI_APPLICATION = None
TEMPLATE_DIRS = (
get_desktop_root("core/templates"),
)
INSTALLED_APPS = [
'django.contrib.auth',
'django_openid_auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'django_extensions',
# 'debug_toolbar',
'south', # database migration tool
# i18n support
'babeldjango',
# Desktop injects all the other installed apps into here magically.
'desktop',
# App that keeps track of failed logins.
'axes',
]
LOCALE_PATHS = [
get_desktop_root('core/src/desktop/locale')
]
# Keep default values up to date
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
# Not default
'desktop.context_processors.app_name',
)
# Desktop doesn't use an auth profile module, because
# because it doesn't mesh very well with the notion
# of having multiple apps. If your app needs
# to store data related to users, it should
# manage its own table with an appropriate foreign key.
AUTH_PROFILE_MODULE = None
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/" # For djangosaml2 bug.
PYLINTRC = get_desktop_root('.pylintrc')
# Custom CSRF Failure View
CSRF_FAILURE_VIEW = 'desktop.views.csrf_failure'
############################################################
# Part 4: Installation of apps
############################################################
_config_dir = os.getenv("HUE_CONF_DIR", get_desktop_root("conf"))
# Libraries are loaded and configured before the apps
appmanager.load_libs()
_lib_conf_modules = [dict(module=app.conf, config_key=None) for app in appmanager.DESKTOP_LIBS if app.conf is not None]
LOCALE_PATHS.extend([app.locale_path for app in appmanager.DESKTOP_LIBS])
# Load desktop config
_desktop_conf_modules = [dict(module=desktop.conf, config_key=None)]
conf.initialize(_desktop_conf_modules, _config_dir)
# Register the redaction filters into the root logger as soon as possible.
desktop.redaction.register_log_filtering(desktop.conf.get_redaction_policy())
# Activate l10n
# Install apps
appmanager.load_apps(desktop.conf.APP_BLACKLIST.get())
for app in appmanager.DESKTOP_APPS:
INSTALLED_APPS.extend(app.django_apps)
LOCALE_PATHS.append(app.locale_path)
logging.debug("Installed Django modules: %s" % ",".join(map(str, appmanager.DESKTOP_MODULES)))
# Load app configuration
_app_conf_modules = [dict(module=app.conf, config_key=app.config_key) for app in appmanager.DESKTOP_APPS if app.conf is not None]
conf.initialize(_lib_conf_modules, _config_dir)
conf.initialize(_app_conf_modules, _config_dir)
# Now that we've loaded the desktop conf, set the django DEBUG mode based on the conf.
DEBUG = desktop.conf.DJANGO_DEBUG_MODE.get()
TEMPLATE_DEBUG = DEBUG
if DEBUG: # For simplification, force all DEBUG when django_debug_mode is True and re-apply the loggers
os.environ[ENV_DESKTOP_DEBUG] = 'True'
desktop.log.basic_logging(os.environ[ENV_HUE_PROCESS_NAME])
desktop.log.fancy_logging()
############################################################
# Part 4a: Django configuration that requires bound Desktop
# configs.
############################################################
# Configure allowed hosts
ALLOWED_HOSTS = desktop.conf.ALLOWED_HOSTS.get()
X_FRAME_OPTIONS = desktop.conf.X_FRAME_OPTIONS.get()
# Configure hue admins
ADMINS = []
for admin in desktop.conf.DJANGO_ADMINS.get():
admin_conf = desktop.conf.DJANGO_ADMINS[admin]
if 'name' in admin_conf.bind_to and 'email' in admin_conf.bind_to:
ADMINS.append(((admin_conf.NAME.get(), admin_conf.EMAIL.get())))
ADMINS = tuple(ADMINS)
MANAGERS = ADMINS
# Server Email Address
SERVER_EMAIL = desktop.conf.DJANGO_SERVER_EMAIL.get()
# Email backend
EMAIL_BACKEND = desktop.conf.DJANGO_EMAIL_BACKEND.get()
# Configure database
if os.getenv('DESKTOP_DB_CONFIG'):
conn_string = os.getenv('DESKTOP_DB_CONFIG')
logging.debug("DESKTOP_DB_CONFIG SET: %s" % (conn_string))
default_db = dict(zip(
["ENGINE", "NAME", "TEST_NAME", "USER", "PASSWORD", "HOST", "PORT"],
conn_string.split(':')))
default_db['NAME'] = default_db['NAME'].replace('#', ':') # For is_db_alive command
else:
test_name = os.environ.get('DESKTOP_DB_TEST_NAME', get_desktop_root('desktop-test.db'))
logging.debug("DESKTOP_DB_TEST_NAME SET: %s" % test_name)
test_user = os.environ.get('DESKTOP_DB_TEST_USER', 'hue_test')
logging.debug("DESKTOP_DB_TEST_USER SET: %s" % test_user)
default_db = {
"ENGINE" : desktop.conf.DATABASE.ENGINE.get(),
"NAME" : desktop.conf.DATABASE.NAME.get(),
"USER" : desktop.conf.DATABASE.USER.get(),
"SCHEMA" : desktop.conf.DATABASE.SCHEMA.get(),
"PASSWORD" : desktop.conf.get_database_password(),
"HOST" : desktop.conf.DATABASE.HOST.get(),
"PORT" : str(desktop.conf.DATABASE.PORT.get()),
"OPTIONS": force_dict_to_strings(desktop.conf.DATABASE.OPTIONS.get()),
# DB used for tests
"TEST_NAME" : test_name,
"TEST_USER" : test_user,
# Wrap each request in a transaction.
"ATOMIC_REQUESTS" : True,
"CONN_MAX_AGE" : desktop.conf.DATABASE.CONN_MAX_AGE.get(),
}
DATABASES = {
'default': default_db
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-hue'
}
}
# Configure sessions
SESSION_COOKIE_NAME = desktop.conf.SESSION.COOKIE_NAME.get()
SESSION_COOKIE_AGE = desktop.conf.SESSION.TTL.get()
SESSION_COOKIE_SECURE = desktop.conf.SESSION.SECURE.get()
SESSION_EXPIRE_AT_BROWSER_CLOSE = desktop.conf.SESSION.EXPIRE_AT_BROWSER_CLOSE.get()
# HTTP only
SESSION_COOKIE_HTTPONLY = desktop.conf.SESSION.HTTP_ONLY.get()
CSRF_COOKIE_SECURE = desktop.conf.SESSION.SECURE.get()
CSRF_COOKIE_HTTPONLY = desktop.conf.SESSION.HTTP_ONLY.get()
CSRF_COOKIE_NAME='csrftoken'
SECURE_HSTS_SECONDS = desktop.conf.SECURE_HSTS_SECONDS.get()
SECURE_HSTS_INCLUDE_SUBDOMAINS = desktop.conf.SECURE_HSTS_INCLUDE_SUBDOMAINS.get()
SECURE_CONTENT_TYPE_NOSNIFF = desktop.conf.SECURE_CONTENT_TYPE_NOSNIFF.get()
SECURE_BROWSER_XSS_FILTER = desktop.conf.SECURE_BROWSER_XSS_FILTER.get()
SECURE_SSL_REDIRECT = desktop.conf.SECURE_SSL_REDIRECT.get()
SECURE_SSL_HOST = desktop.conf.SECURE_SSL_HOST.get()
SECURE_REDIRECT_EXEMPT = desktop.conf.SECURE_REDIRECT_EXEMPT.get()
# django-nose test specifics
TEST_RUNNER = 'desktop.lib.test_runners.HueTestRunner'
# Turn off cache middleware
if 'test' in sys.argv:
CACHE_MIDDLEWARE_SECONDS = 0
# Limit Nose coverage to Hue apps
NOSE_ARGS = [
'--cover-package=%s' % ','.join([app.name for app in appmanager.DESKTOP_APPS + appmanager.DESKTOP_LIBS]),
'--no-path-adjustment',
'--traverse-namespace'
]
TIME_ZONE = desktop.conf.TIME_ZONE.get()
if desktop.conf.DEMO_ENABLED.get():
AUTHENTICATION_BACKENDS = ('desktop.auth.backend.DemoBackend',)
else:
AUTHENTICATION_BACKENDS = tuple(desktop.conf.AUTH.BACKEND.get())
EMAIL_HOST = desktop.conf.SMTP.HOST.get()
EMAIL_PORT = desktop.conf.SMTP.PORT.get()
EMAIL_HOST_USER = desktop.conf.SMTP.USER.get()
EMAIL_HOST_PASSWORD = desktop.conf.get_smtp_password()
EMAIL_USE_TLS = desktop.conf.SMTP.USE_TLS.get()
DEFAULT_FROM_EMAIL = desktop.conf.SMTP.DEFAULT_FROM.get()
# Used for securely creating sessions. Should be unique and not shared with anybody. Changing auth backends will invalidate all open sessions.
SECRET_KEY = desktop.conf.get_secret_key()
if SECRET_KEY:
SECRET_KEY += str(AUTHENTICATION_BACKENDS)
else:
import uuid
SECRET_KEY = str(uuid.uuid4())
# Axes
AXES_LOGIN_FAILURE_LIMIT = desktop.conf.AUTH.LOGIN_FAILURE_LIMIT.get()
AXES_LOCK_OUT_AT_FAILURE = desktop.conf.AUTH.LOGIN_LOCK_OUT_AT_FAILURE.get()
AXES_COOLOFF_TIME = None
if desktop.conf.AUTH.LOGIN_COOLOFF_TIME.get() and desktop.conf.AUTH.LOGIN_COOLOFF_TIME.get() != 0:
AXES_COOLOFF_TIME = desktop.conf.AUTH.LOGIN_COOLOFF_TIME.get()
AXES_USE_USER_AGENT = desktop.conf.AUTH.LOGIN_LOCK_OUT_USE_USER_AGENT.get()
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = desktop.conf.AUTH.LOGIN_LOCK_OUT_BY_COMBINATION_USER_AND_IP.get()
AXES_BEHIND_REVERSE_PROXY = desktop.conf.AUTH.BEHIND_REVERSE_PROXY.get()
AXES_REVERSE_PROXY_HEADER = desktop.conf.AUTH.REVERSE_PROXY_HEADER.get()
# SAML
SAML_AUTHENTICATION = 'libsaml.backend.SAML2Backend' in AUTHENTICATION_BACKENDS
if SAML_AUTHENTICATION:
from libsaml.saml_settings import *
INSTALLED_APPS.append('libsaml')
LOGIN_URL = '/saml2/login/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Middleware classes.
for middleware in desktop.conf.MIDDLEWARE.get():
MIDDLEWARE_CLASSES.append(middleware)
# OpenId
OPENID_AUTHENTICATION = 'libopenid.backend.OpenIDBackend' in AUTHENTICATION_BACKENDS
if OPENID_AUTHENTICATION:
from libopenid.openid_settings import *
INSTALLED_APPS.append('libopenid')
LOGIN_URL = '/openid/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# OAuth
OAUTH_AUTHENTICATION='liboauth.backend.OAuthBackend' in AUTHENTICATION_BACKENDS
if OAUTH_AUTHENTICATION:
INSTALLED_APPS.append('liboauth')
LOGIN_URL = '/oauth/accounts/login'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# URL Redirection white list.
if desktop.conf.REDIRECT_WHITELIST.get():
MIDDLEWARE_CLASSES.append('desktop.middleware.EnsureSafeRedirectURLMiddleware')
# Enable X-Forwarded-Host header if the load balancer requires it
USE_X_FORWARDED_HOST = desktop.conf.USE_X_FORWARDED_HOST.get()
# Support HTTPS load-balancing
if desktop.conf.SECURE_PROXY_SSL_HEADER.get():
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Add last activity tracking and idle session timeout
if 'useradmin' in [app.name for app in appmanager.DESKTOP_APPS]:
MIDDLEWARE_CLASSES.append('useradmin.middleware.LastActivityMiddleware')
################################################################
# Register file upload handlers
# This section must go after the desktop lib modules are loaded
################################################################
# Insert our custom upload handlers
file_upload_handlers = [
'hadoop.fs.upload.HDFSfileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
if is_s3_enabled():
file_upload_handlers.insert(0, 'aws.s3.upload.S3FileUploadHandler')
FILE_UPLOAD_HANDLERS = tuple(file_upload_handlers)
############################################################
# Necessary for South to not fuzz with tests. Fixed in South 0.7.1
SKIP_SOUTH_TESTS = True
# Set up environment variable so Kerberos libraries look at our private
# ticket cache
os.environ['KRB5CCNAME'] = desktop.conf.KERBEROS.CCACHE_PATH.get()
if not os.getenv('SERVER_SOFTWARE'):
os.environ['SERVER_SOFTWARE'] = 'apache'
# If Hue is configured to use a CACERTS truststore, make sure that the
# REQUESTS_CA_BUNDLE is set so that we can use it when we make external requests.
# This is for the REST calls made by Hue with the requests library.
if desktop.conf.SSL_CACERTS.get() and os.environ.get('REQUESTS_CA_BUNDLE') is None:
os.environ['REQUESTS_CA_BUNDLE'] = desktop.conf.SSL_CACERTS.get()
# Preventing local build failure by not validating the default value of REQUESTS_CA_BUNDLE
if os.environ.get('REQUESTS_CA_BUNDLE') and os.environ.get('REQUESTS_CA_BUNDLE') != desktop.conf.SSL_CACERTS.config.default and not os.path.isfile(os.environ['REQUESTS_CA_BUNDLE']):
raise Exception(_('SSL Certificate pointed by REQUESTS_CA_BUNDLE does not exist: %s') % os.environ['REQUESTS_CA_BUNDLE'])
# Memory
if desktop.conf.MEMORY_PROFILER.get():
MEMORY_PROFILER = hpy()
MEMORY_PROFILER.setrelheap()
if not desktop.conf.DATABASE_LOGGING.get():
def disable_database_logging():
from django.db.backends import BaseDatabaseWrapper
from django.db.backends.util import CursorWrapper
BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self)
disable_database_logging()
| |
# -*- coding: utf-8 -*-
from collections import OrderedDict
import six
from fixtures_mongoengine import FixturesMongoengineException
from fixtures_mongoengine.fixture import Fixture, get_fixture_class, BaseFixture
"""
Metaclass idea and parts of code taken from https://github.com/croach/Flask-Fixtures
"""
TEST_SETUP_NAMES = ('setUp',)
TEST_TEARDOWN_NAMES = ('tearDown',)
def setup(obj):
"""
:type obj: FixturesMixin
"""
obj.unload_fixtures()
obj.load_fixtures()
def teardown(obj):
"""
:type obj: FixturesMixin
"""
obj.unload_fixtures()
class MetaFixturesMixin(type):
def __new__(mcs, name, bases, attrs):
fixtures_conf = attrs.get('fixtures_conf', [])
# We only need to do something if there's a set of fixtures,
# otherwise, do nothing. The main reason this is here is because this
# method is called when the FixturesMixin class is created and we
# don't want to do any test setup on that class.
if fixtures_conf:
child_setup_fn = mcs.get_child_fn(attrs, TEST_SETUP_NAMES, bases)
child_teardown_fn = mcs.get_child_fn(attrs, TEST_TEARDOWN_NAMES, bases)
attrs[child_setup_fn.__name__] = mcs.setup_handler(setup, child_setup_fn)
attrs[child_teardown_fn.__name__] = mcs.teardown_handler(teardown, child_teardown_fn)
return super(MetaFixturesMixin, mcs).__new__(mcs, name, bases, attrs)
@staticmethod
def setup_handler(setup_fixtures_fn, setup_fn):
"""Returns a function that adds fixtures handling to the setup method.
Makes sure that fixtures are setup before calling the given setup method.
"""
def handler(obj):
setup_fixtures_fn(obj)
setup_fn(obj)
return handler
@staticmethod
def teardown_handler(teardown_fixtures_fn, teardown_fn):
"""Returns a function that adds fixtures handling to the teardown method.
Calls the given teardown method first before calling the fixtures teardown.
"""
def handler(obj):
teardown_fn(obj)
teardown_fixtures_fn(obj)
return handler
@staticmethod
def get_child_fn(attrs, names, bases):
"""Returns a function from the child class that matches one of the names.
Searches the child class's set of methods (i.e., the attrs dict) for all
the functions matching the given list of names. If more than one is found,
an exception is raised, if one is found, it is returned, and if none are
found, a function that calls the default method on each parent class is
returned.
"""
def call_method(obj, method):
"""Calls a method as either a class method or an instance method.
"""
# The __get__ method takes an instance and an owner which changes
# depending on the calling object. If the calling object is a class,
# the instance is None and the owner will be the object itself. If the
# calling object is an instance, the instance will be the calling object
# and the owner will be its class. For more info on the __get__ method,
# see http://docs.python.org/2/reference/datamodel.html#object.__get__.
if isinstance(obj, type):
instance = None
owner = obj
else:
instance = obj
owner = obj.__class__
method.__get__(instance, owner)()
# Create a default function that calls the default method on each parent
default_name = names[0]
def default_fn(obj):
for cls in bases:
if hasattr(cls, default_name):
call_method(obj, getattr(cls, default_name))
default_fn.__name__ = default_name
# Get all of the functions in the child class that match the list of names
fns = [(name, attrs[name]) for name in names if name in attrs]
# Raise an error if more than one setup/teardown method is found
if len(fns) > 1:
raise RuntimeError("Cannot have more than one setup or teardown method per context (class or test).")
# If one setup/teardown function was found, return it
elif len(fns) == 1:
name, fn = fns[0]
def child_fn(obj):
call_method(obj, fn)
child_fn.__name__ = name
return child_fn
# Otherwise, return the default function
else:
return default_fn
class FixturesMixin(six.with_metaclass(MetaFixturesMixin, object)):
fixtures_conf = {}
"""
Declares the fixtures that are needed by the current test case.
The return value of this method must be an array of fixture configurations. For example,
```python
[
'users' => UserFixture,
'articles' => ArticleFixture
]
```
"""
__fixtures = None
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
if name in self.get_fixtures():
return self.get_fixtures()[name]
raise AttributeError('Attribute "{}" not found.'.format(name))
def load_fixtures(self):
for fixture in self.get_fixtures().values():
fixture.before_load()
for fixture in self.get_fixtures().values():
fixture.load()
fixtures = list(self.get_fixtures().values())
fixtures.reverse()
for fixture in fixtures:
fixture.after_load()
def unload_fixtures(self):
for fixture in self.get_fixtures().values():
fixture.before_unload()
fixtures = list(self.get_fixtures().values())
fixtures.reverse()
for fixture in fixtures:
fixture.unload()
for fixture in fixtures:
fixture.after_unload()
def get_fixtures(self):
"""
:rtype: OrderedDict[Fixture]
"""
if self.__fixtures is None:
self.__fixtures = self._create_fixtures()
return self.__fixtures
def _create_fixtures(self):
aliases = {}
for name, fixture_class in six.iteritems(self.fixtures_conf):
aliases[fixture_class] = name
instances = OrderedDict()
stack = [fixture_class for name, fixture_class in six.iteritems(self.fixtures_conf)]
stack.reverse()
while len(stack) > 0:
fixture = stack.pop()
if isinstance(fixture, BaseFixture):
fixture_class = fixture.__class__
if fixture_class in instances:
del instances[fixture_class]
instances[fixture_class] = fixture
else:
fixture_class = fixture
if fixture_class not in instances:
instances[fixture_class] = None
fixture = fixture_class()
stack.append(fixture)
for dep in fixture.depends.values():
if isinstance(dep, six.string_types):
dep = get_fixture_class(dep)
stack.append(dep)
elif instances[fixture_class] is None:
msg = 'A circular dependency is detected for fixture {}.'.format(fixture_class.__name__)
raise FixturesMongoengineException(msg)
fixtures = OrderedDict()
for fixture_class, fixture in six.iteritems(instances):
fixture.init_depended_fixtures(instances)
name = aliases[fixture_class] if fixture_class in aliases else fixture_class.__name__
fixtures[name] = fixture
return fixtures
| |
#!/usr/bin/env python
#
# svndumpfilter_tests.py: testing the 'svndumpfilter' tool.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import os
import sys
import tempfile
# Our testing module
import svntest
from svntest.verify import SVNExpectedStdout, SVNExpectedStderr
# Get some helper routines from svnadmin_tests
from svnadmin_tests import load_and_verify_dumpstream, test_create
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Helper routines
def filter_and_return_output(dump, bufsize=0, *varargs):
"""Filter the array of lines passed in 'dump' and return the output
and errput"""
if isinstance(dump, str):
dump = [ dump ]
# Does the caller want the stderr?
try:
varargs.index('-q')
expected_errput = None # Stderr with -q or --quiet is a real error!
except:
try:
varargs.index('--quiet')
expected_errput = None
except:
expected_errput = svntest.verify.AnyOutput
## TODO: Should we handle exit_code?
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svndumpfilter_binary, expected_errput, bufsize, 1,
dump, *varargs)
# Since we call svntest.main.run_command_stdin() in binary mode,
# normalize the stderr line endings on Windows ourselves.
if sys.platform == 'win32':
errput = map(lambda x : x.replace('\r\n', '\n'), errput)
return output, errput
######################################################################
# Tests
@Issue(2982)
def reflect_dropped_renumbered_revs(sbox):
"reflect dropped renumbered revs in svn:mergeinfo"
## See http://subversion.tigris.org/issues/show_bug.cgi?id=2982. ##
# Test svndumpfilter with include option
test_create(sbox)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'with_merges.dump')
dumpfile = open(dumpfile_location).read()
filtered_out, filtered_err = filter_and_return_output(
dumpfile, 0, "include",
"trunk", "branch1",
"--skip-missing-merge-sources",
"--drop-empty-revs",
"--renumber-revs", "--quiet")
load_and_verify_dumpstream(sbox, [], [], None, filtered_out,
"--ignore-uuid")
# Verify the svn:mergeinfo properties
url = sbox.repo_url
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - /branch1:4-5\n",
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# Test svndumpfilter with exclude option
test_create(sbox)
filtered_out, filtered_err = filter_and_return_output(
dumpfile, 0, "exclude", "branch1",
"--skip-missing-merge-sources",
"--drop-empty-revs",
"--renumber-revs", "--quiet")
load_and_verify_dumpstream(sbox, [], [], None, filtered_out,
"--ignore-uuid")
# Verify the svn:mergeinfo properties
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - \n",
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
@Issue(3181)
def svndumpfilter_loses_mergeinfo(sbox):
"svndumpfilter loses mergeinfo"
#svndumpfilter loses mergeinfo if invoked without --renumber-revs
## See http://subversion.tigris.org/issues/show_bug.cgi?id=3181. ##
test_create(sbox)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'with_merges.dump')
dumpfile = open(dumpfile_location).read()
filtered_out, filtered_err = filter_and_return_output(dumpfile, 0, "include",
"trunk", "branch1",
"--quiet")
load_and_verify_dumpstream(sbox, [], [], None, filtered_out)
# Verify the svn:mergeinfo properties
url = sbox.repo_url
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - /branch1:4-8\n",
])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
def _simple_dumpfilter_test(sbox, dumpfile, *dumpargs):
wc_dir = sbox.wc_dir
filtered_output, filtered_err = filter_and_return_output(dumpfile, 0,
'--quiet',
*dumpargs)
# Setup our expectations
load_and_verify_dumpstream(sbox, [], [], None, filtered_output,
'--ignore-uuid')
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/B/E/alpha')
expected_disk.remove('A/B/E/beta')
expected_disk.remove('A/B/E')
expected_disk.remove('A/D/H/chi')
expected_disk.remove('A/D/H/psi')
expected_disk.remove('A/D/H/omega')
expected_disk.remove('A/D/H')
expected_disk.remove('A/D/G/pi')
expected_disk.remove('A/D/G/rho')
expected_disk.remove('A/D/G/tau')
expected_disk.remove('A/D/G')
expected_output = svntest.wc.State(wc_dir, {
'A' : Item(status='A '),
'A/B' : Item(status='A '),
'A/B/lambda' : Item(status='A '),
'A/B/F' : Item(status='A '),
'A/mu' : Item(status='A '),
'A/C' : Item(status='A '),
'A/D' : Item(status='A '),
'A/D/gamma' : Item(status='A '),
'iota' : Item(status='A '),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/B/E/alpha')
expected_status.remove('A/B/E/beta')
expected_status.remove('A/B/E')
expected_status.remove('A/D/H/chi')
expected_status.remove('A/D/H/psi')
expected_status.remove('A/D/H/omega')
expected_status.remove('A/D/H')
expected_status.remove('A/D/G/pi')
expected_status.remove('A/D/G/rho')
expected_status.remove('A/D/G/tau')
expected_status.remove('A/D/G')
# Check that our paths really were excluded
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
@Issue(2697)
def dumpfilter_with_targets(sbox):
"svndumpfilter --targets blah"
## See http://subversion.tigris.org/issues/show_bug.cgi?id=2697. ##
test_create(sbox)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'greek_tree.dump')
dumpfile = open(dumpfile_location).read()
(fd, targets_file) = tempfile.mkstemp(dir=svntest.main.temp_dir)
try:
targets = open(targets_file, 'w')
targets.write('/A/D/H\n')
targets.write('/A/D/G\n')
targets.close()
_simple_dumpfilter_test(sbox, dumpfile,
'exclude', '/A/B/E', '--targets', targets_file)
finally:
os.close(fd)
os.remove(targets_file)
def dumpfilter_with_patterns(sbox):
"svndumpfilter --pattern PATH_PREFIX"
test_create(sbox)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'greek_tree.dump')
dumpfile = open(dumpfile_location).read()
_simple_dumpfilter_test(sbox, dumpfile,
'exclude', '--pattern', '/A/D/[GH]*', '/A/[B]/E*')
#----------------------------------------------------------------------
# More testing for issue #3020 'Reflect dropped/renumbered revisions in
# svn:mergeinfo data during svnadmin load'
#
# Specifically, test that svndumpfilter, when used with the
# --skip-missing-merge-sources option, removes mergeinfo that refers to
# revisions that are older than the oldest revision in the dump stream.
@Issue(3020)
def filter_mergeinfo_revs_outside_of_dump_stream(sbox):
"filter mergeinfo revs outside of dump stream"
test_create(sbox)
# Load a partial dump into an existing repository.
#
# Picture == 1k words:
#
# The dump file we filter in this test, 'mergeinfo_included_partial.dump', is
# a dump of r6:HEAD of the following repos:
#
# __________________________________________
# | |
# | ____________________________|_____
# | | | |
# trunk---r2---r3-----r5---r6-------r8---r9---------------> | |
# r1 | | | | | |
# intial | | | |______ | |
# import copy | copy | merge merge
# | | | merge (r5) (r8)
# | | | (r9) | |
# | | | | | |
# | | V V | |
# | | branches/B2-------r11---r12----> | |
# | | r7 |____| | |
# | | | | |
# | merge |___ | |
# | (r6) | | |
# | |_________________ | | |
# | | merge | |
# | | (r11-12) | |
# | | | | |
# V V V | |
# branches/B1-------------------r10--------r13--> | |
# r4 | |
# | V V
# branches/B1/B/E------------------------------r14---r15->
#
#
# The mergeinfo on the complete repos would look like this:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:11-12
# /trunk:6,9
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:11-12
# /trunk/B/E:5-6,8-9
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:9
#
# We will run the partial dump through svndumpfilter using the the
# --skip-missing-merge-soruces which should strip out any revisions < 6.
# Then we'll load the filtered result into an empty repository. This
# should offset the incoming mergeinfo by -5. In addition, any mergeinfo
# revisions that are adjusted to r1 should be removed because that implies
# a merge of -r0:1, which is impossible. The resulting mergeinfo should
# look like this:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:6-7
# /trunk:4
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:6-7
# /trunk/B/E:3-4
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:4
partial_dump = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'mergeinfo_included_partial.dump')
partial_dump_contents = open(partial_dump).read()
filtered_dumpfile2, filtered_out = filter_and_return_output(
partial_dump_contents,
8192, # Set a sufficiently large bufsize to avoid a deadlock
"include", "trunk", "branches",
"--skip-missing-merge-sources",
"--quiet")
load_and_verify_dumpstream(sbox, [], [], None, filtered_dumpfile2,
'--ignore-uuid')
# Check the resulting mergeinfo.
url = sbox.repo_url + "/branches"
expected_output = svntest.verify.UnorderedOutput([
url + "/B1 - /branches/B2:6-7\n",
"/trunk:4\n",
url + "/B2 - /trunk:4\n",
url + "/B1/B/E - /branches/B2/B/E:6-7\n",
"/trunk/B/E:3-4\n"])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# Blow away the current repos, create an empty one in its place, and
# then load this skeleton repos into the empty target:
#
# Projects/ (Added r1)
# README (Added r2)
# Project-X (Added r3)
# Project-Y (Added r4)
# Project-Z (Added r5)
# docs/ (Added r6)
# README (Added r6).
test_create(sbox)
skeleton_dumpfile = open(os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')).read()
load_and_verify_dumpstream(sbox, [], [], None, skeleton_dumpfile,
'--ignore-uuid')
partial_dump2 = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'mergeinfo_included_partial.dump')
partial_dump_contents2 = open(partial_dump2).read()
# Now use the partial dump file we used above, but this time exclude
# the B2 branch. Load the filtered dump into the /Projects/Project-X
# subtree of the skeleton repos.
filtered_dumpfile2, filtered_err = filter_and_return_output(
partial_dump_contents2,
8192, # Set a sufficiently large bufsize to avoid a deadlock
"exclude", "branches/B2",
"--skip-missing-merge-sources",
"--drop-empty-revs",
"--renumber-revs")
# Starting with the same expectation we had when loading into an empty
# repository, adjust each revision by +6 to account for the six revision
# already present in the target repos, that gives:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:12-13
# /trunk:10
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:12-13
# /trunk/B/E:9-10
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:10
#
# ...But /branches/B2 has been filtered out, so all references to
# that branch should be gone, leaving:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /trunk:10
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /trunk/B/E:9-10
#
# ...But wait, there's more! Because we use the '--drop-empty-revs'
# option, when filtering out 'branches/B2' all the revisions that effect
# only that branch should be dropped (i.e. original revs r7, r11, and r12).
# In and of itself that has no effect, but we also specifiy the
# '--renumber-revs' option, so when r7 is dropped, r8 should map to r7,
# r9 to r8, and r10 to r9 (and so on). That should finally leave us with:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /trunk:9
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /trunk/B/E:8-9
#
# This test currently fails with this mergeinfo:
#
#
#
#
# Check that all the blather above really happens. First does
# svndumpfilter report what we expect to stderr?
expected_err = [
"Excluding (and dropping empty revisions for) prefixes:\n",
" '/branches/B2'\n",
"\n",
"Revision 6 committed as 6.\n",
"Revision 7 skipped.\n", # <-- DROP!
"Revision 8 committed as 7.\n",
"Revision 9 committed as 8.\n",
"Revision 10 committed as 9.\n",
"Revision 11 skipped.\n", # <-- DROP!
"Revision 12 skipped.\n", # <-- DROP!
"Revision 13 committed as 10.\n",
"Revision 14 committed as 11.\n",
"Revision 15 committed as 12.\n",
"\n",
"Dropped 3 revisions.\n",
"\n",
"Revisions renumbered as follows:\n",
" 15 => 12\n",
" 14 => 11\n",
" 13 => 10\n",
" 12 => (dropped)\n", # <-- DROP!
" 11 => (dropped)\n", # <-- DROP!
" 10 => 9\n",
" 9 => 8\n",
" 8 => 7\n",
" 7 => (dropped)\n", # <-- DROP!
" 6 => 6\n",
"\n",
"Dropped 2 nodes:\n",
" '/branches/B2'\n",
" '/branches/B2/D/H/chi'\n",
"\n"]
svntest.verify.verify_outputs(
"Actual svndumpfilter stderr does not agree with expected stderr",
None, filtered_err, None, expected_err)
# Now actually load the filtered dump into the skeleton repository
# and then check the resulting mergeinfo.
load_and_verify_dumpstream(sbox, [], [], None, filtered_dumpfile2,
'--parent-dir', '/Projects/Project-X',
'--ignore-uuid')
url = sbox.repo_url + "/Projects/Project-X/branches"
expected_output = svntest.verify.UnorderedOutput([
url + "/B1 - /Projects/Project-X/trunk:9\n",
url + "/B1/B/E - /Projects/Project-X/trunk/B/E:8-9\n"])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
#----------------------------------------------------------------------
# More testing for issue #3020 'Reflect dropped/renumbered revisions in
# svn:mergeinfo data during svnadmin load'
#
# Using svndumpfilter with the --drop-empty-revs option, but without the
# --renumber-revs option, can create a dump with non-contiguous revisions.
# Such dumps should not interfere with the correct remapping of mergeinfo
# source revisions.
@Issue(3020)
def dropped_but_not_renumbered_empty_revs(sbox):
"mergeinfo maps correctly when dropping revs"
test_create(sbox)
# The dump file mergeinfo_included_full.dump represents this repository:
#
#
# __________________________________________
# | |
# | ____________________________|_____
# | | | |
# trunk---r2---r3-----r5---r6-------r8---r9---------------> | |
# r1 | | | | | |
# intial | | | |______ | |
# import copy | copy | merge merge
# | | | merge (r5) (r8)
# | | | (r9) | |
# | | | | | |
# | | V V | |
# | | branches/B2-------r11---r12----> | |
# | | r7 |____| | |
# | | | | |
# | merge |___ | |
# | (r6) | | |
# | |_________________ | | |
# | | merge | |
# | | (r11-12) | |
# | | | | |
# V V V | |
# branches/B1-------------------r10--------r13--> | |
# r4 | |
# | V V
# branches/B1/B/E------------------------------r14---r15->
#
#
# The mergeinfo on mergeinfo_included_full.dump is:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:11-12
# /trunk:6,9
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:11-12
# /trunk/B/E:5-6,8-9
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:9
#
# Use svndumpfilter to filter mergeinfo_included_full.dump, excluding
# branches/B2, while dropping, but not renumbering, empty revisions.
#
# Load the filtered dump into an empty repository. Since we are excluding
# /branches/B2 and dropping empty revs, revisions 7, 11, and 12 won't be
# included in the filtered dump.
full_dump = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'mergeinfo_included_full.dump')
full_dump_contents = open(full_dump).read()
filtered_dumpfile, filtered_out = filter_and_return_output(
full_dump_contents,
8192, # Set a sufficiently large bufsize to avoid a deadlock
"exclude", "branches/B2",
"--skip-missing-merge-sources", "--drop-empty-revs")
# Now load the filtered dump into an empty repository.
load_and_verify_dumpstream(sbox, [], [], None, filtered_dumpfile,
'--ignore-uuid')
# The mergeinfo in the newly loaded repos should have no references to the
# dropped branch and the remaining merge source revs should be remapped to
# reflect the fact that the loaded repository no longer has any empty
# revisions:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /trunk:6,8
# ^
# With r7 dropped, r9 in the incoming
# dump becomes r8 in the loaded repos.
#
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /trunk/B/E:5-8
# ^
# With r7 dropped, r8 and r9 in the incoming
# dump becomes r7 and r8 in the loaded repos.
# Check the resulting mergeinfo.
url = sbox.repo_url + "/branches"
expected_output = svntest.verify.UnorderedOutput([
url + "/B1 - /trunk:6,8\n",
url + "/B1/B/E - /trunk/B/E:5-8\n"])
svntest.actions.run_and_verify_svn(None, expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
@Issue(4234)
def dumpfilter_targets_expect_leading_slash_prefixes(sbox):
"dumpfilter targets expect leading '/' in prefixes"
## See http://subversion.tigris.org/issues/show_bug.cgi?id=4234. ##
test_create(sbox)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'greek_tree.dump')
dumpfile = open(dumpfile_location).read()
(fd, targets_file) = tempfile.mkstemp(dir=svntest.main.temp_dir)
try:
targets = open(targets_file, 'w')
# Removing the leading slash in path prefixes should work.
targets.write('A/D/H\n')
targets.write('A/D/G\n')
targets.close()
_simple_dumpfilter_test(sbox, dumpfile,
'exclude', '/A/B/E', '--targets', targets_file)
finally:
os.close(fd)
os.remove(targets_file)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
reflect_dropped_renumbered_revs,
svndumpfilter_loses_mergeinfo,
dumpfilter_with_targets,
dumpfilter_with_patterns,
filter_mergeinfo_revs_outside_of_dump_stream,
dropped_but_not_renumbered_empty_revs,
dumpfilter_targets_expect_leading_slash_prefixes,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| |
#!/usr/bin/env python3
"""
Periscope API for the masses
"""
import os
from dateutil.parser import parse as dt_parse
class BroadcastDownloadInfo:
"""Contains information about the broadcast's download but not about the broadcast itself"""
def __init__(self):
self.dl_info = dict()
self.dl_info['dl_times'] = list()
self.dl_info['dl_failures'] = 0
self.dl_info['wait_for_replay'] = False
self.dl_info['replay_downloaded'] = False
self.dl_info['last_failure_reason'] = None
@property
def dl_times(self):
"""List of timestamps broadcast download was started or restarted"""
return self.dl_info['dl_times']
@property
def dl_failures(self):
"""Counter for how many times download has failed"""
return self.dl_info['dl_failures']
@dl_failures.setter
def dl_failures(self, value):
"""Sets download failure count"""
self.dl_info['dl_failures'] = value
@property
def failure_reason(self):
"""Gets exception object from the last failure, if any"""
return self.dl_info['last_failure_reason']
@failure_reason.setter
def failure_reason(self, raised_exception):
"""Stores exception object from last failure, if any"""
self.dl_info['last_failure_reason'] = raised_exception
@property
def wait_for_replay(self):
"""Check if broadcast live should be skipped and replay should be waited for"""
return self.dl_info['wait_for_replay']
@wait_for_replay.setter
def wait_for_replay(self, boolean):
"""Return whether or not live download should be skipped and replay should be waited for"""
self.dl_info['wait_for_replay'] = bool(boolean)
@property
def replay_downloaded(self):
"""Boolean indicating whether or not a replay of the broadcast has been downloaded"""
return self.dl_info['replay_downloaded']
@replay_downloaded.setter
def replay_downloaded(self, boolean):
"""Set indicator for whether or not a replay of the broadcast has been downloaded"""
self.dl_info['replay_downloaded'] = bool(boolean)
class Broadcast(BroadcastDownloadInfo):
"""Broadcast object"""
def __init__(self, api, broadcast):
super().__init__()
self.api = api
self.info = broadcast
self.cookie = self.api.session.config.get('cookie')[:]
self.lock_name = False
self._original_title = self.title
self._original_filetitle = self.filetitle
self.dl_info['download_directory'] = self.api.session.config.get('download_directory')[:]
self.dl_info['separate_folders'] = self.api.session.config.get('separate_folders')
def update_info(self):
"""Updates broadcast object with latest info from periscope"""
updates = self.api.get_broadcast_info(self.id)
if not updates:
self.info['available_for_replay'] = False
self.info['state'] = "DELETED"
else:
self.info = updates
self._original_title = self.title
self._original_filetitle = self.filetitle
def num_restarts(self, span=10):
"""Gets number of times download has been started within past span seconds"""
if len(self.dl_times) > 0:
return len([i for i in self.dl_times if i > self.dl_times[-1] - span])
return 0
@property
def download_directory(self):
"""Returns broadcast download directory"""
if self.dl_info['separate_folders']:
return os.path.join(self.dl_info['download_directory'], self.username)
return self.dl_info['download_directory']
@property
def id(self):
"""Returns broadcast id"""
return self.info['id']
@property
def username(self):
"""Returns broadcaster username"""
return self.info['username']
@property
def start(self):
"""Returns ATOM string indicating when broadcast started"""
return self.info['start']
@property
def start_dt(self):
"""Datetime object version of broadcast start time"""
return dt_parse(self.info['start'])
@property
def startdate(self):
"""Human-readable date string of when broadcast started"""
return self.start_dt.strftime('%m/%d/%Y')
@property
def starttime(self):
"""Human-readable time string of when broadcast started"""
return self.start_dt.strftime('%H:%M:%S')
@property
def title(self):
"""Title of broadcast (in the context of the downloader)"""
if not self.lock_name:
suffix = []
if not self.islive:
suffix.append('REPLAY')
if self.private:
suffix.append('PRIVATE')
self._original_title = ' '.join(
[self.username, self.startdate, self.starttime, self.id, ' '.join(suffix)])
return self._original_title.strip()
@property
def filepathname(self):
"""Get filename for broadcast, including path, without extension"""
return os.path.join(self.download_directory, self.filetitle)
@property
def filetitle(self):
"""Version of title safe for use as a filename"""
if not self.lock_name:
if self.islive:
self._original_filetitle = self.title.replace('/', '-').replace(':', '-') + '.live'
else:
self._original_filetitle = self.title.replace('/', '-').replace(':', '-')
return self._original_filetitle
@property
def islive(self):
"""Check if broadcast is running or not"""
if self.info['state'] == 'RUNNING':
return True
return False
@property
def isreplay(self):
"""Check if broadcast is replay or not"""
if self.available and not self.islive:
return True
return False
@property
def isnewer(self):
"""Check if broadcast is newer than last broadcast time"""
last_broadcast = self.api.session.config.get('last_check')
if not last_broadcast:
return None
elif self.start_dt > dt_parse(last_broadcast):
return True
else:
return False
@property
def state(self):
"""Get broadcast state string"""
return self.info['state']
@property
def available(self):
"""Check if broadcast is available for replay"""
return self.info['available_for_replay']
@property
def private(self):
"""Boolean indicating if broadcast is private or not"""
return self.info['is_locked']
| |
# orm/interfaces.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""
Contains various base classes used throughout the ORM.
Defines some key base classes prominent within the internals.
This module and the classes within are mostly private, though some attributes
are exposed when inspecting mappings.
"""
from __future__ import absolute_import
import collections
from . import exc as orm_exc
from . import path_registry
from .base import _MappedAttribute # noqa
from .base import EXT_CONTINUE
from .base import EXT_SKIP
from .base import EXT_STOP
from .base import InspectionAttr # noqa
from .base import InspectionAttrInfo # noqa
from .base import MANYTOMANY
from .base import MANYTOONE
from .base import NOT_EXTENSION
from .base import ONETOMANY
from .. import inspect
from .. import inspection
from .. import util
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql.base import ExecutableOption
from ..sql.traversals import HasCacheKey
__all__ = (
"EXT_CONTINUE",
"EXT_STOP",
"EXT_SKIP",
"ONETOMANY",
"MANYTOMANY",
"MANYTOONE",
"NOT_EXTENSION",
"LoaderStrategy",
"MapperOption",
"LoaderOption",
"MapperProperty",
"PropComparator",
"StrategizedProperty",
)
class ORMStatementRole(roles.StatementRole):
_role_name = (
"Executable SQL or text() construct, including ORM " "aware objects"
)
class ORMColumnsClauseRole(roles.ColumnsClauseRole):
_role_name = "ORM mapped entity, aliased entity, or Column expression"
class ORMEntityColumnsClauseRole(ORMColumnsClauseRole):
_role_name = "ORM mapped or aliased entity"
class ORMFromClauseRole(roles.StrictFromClauseRole):
_role_name = "ORM mapped entity, aliased entity, or FROM expression"
@inspection._self_inspects
class MapperProperty(
HasCacheKey, _MappedAttribute, InspectionAttr, util.MemoizedSlots
):
"""Represent a particular class attribute mapped by :class:`_orm.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`_schema.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`_orm.relationship`,
represented in the mapping as an instance of
:class:`.RelationshipProperty`.
"""
__slots__ = (
"_configure_started",
"_configure_finished",
"parent",
"key",
"info",
)
_cache_key_traversal = [
("parent", visitors.ExtendedInternalTraversal.dp_has_cache_key),
("key", visitors.ExtendedInternalTraversal.dp_string),
]
cascade = frozenset()
"""The set of 'cascade' attribute names.
This collection is checked before the 'cascade_iterator' method is called.
The collection typically only applies to a RelationshipProperty.
"""
is_property = True
"""Part of the InspectionAttr interface; states this object is a
mapper property.
"""
def _memoized_attr_info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`_orm.relationship`, or
:func:`.composite`
functions.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
def setup(self, context, query_entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
Each MapperProperty associated with the target mapper processes the
statement referenced by the query context, adding columns and/or
criterion as appropriate.
"""
def create_row_processor(
self, context, query_entity, path, mapper, result, adapter, populators
):
"""Produce row processing functions and append to the given
set of populators lists.
"""
def cascade_iterator(
self, type_, state, dict_, visited_states, halt_on=None
):
"""Iterate through instances related to the given instance for
a particular 'cascade', starting with this MapperProperty.
Return an iterator3-tuples (instance, mapper, state).
Note that the 'cascade' collection on this MapperProperty is
checked first for the given type before cascade_iterator is called.
This method typically only applies to RelationshipProperty.
"""
return iter(())
def set_parent(self, parent, init):
"""Set the parent mapper that references this MapperProperty.
This method is overridden by some subclasses to perform extra
setup when the mapper is first known.
"""
self.parent = parent
def instrument_class(self, mapper):
"""Hook called by the Mapper to the property to initiate
instrumentation of the class attribute managed by this
MapperProperty.
The MapperProperty here will typically call out to the
attributes module to set up an InstrumentedAttribute.
This step is the first of two steps to set up an InstrumentedAttribute,
and is called early in the mapper setup process.
The second step is typically the init_class_attribute step,
called from StrategizedProperty via the post_instrument_class()
hook. This step assigns additional state to the InstrumentedAttribute
(specifically the "impl") which has been determined after the
MapperProperty has determined what kind of persistence
management it needs to do (e.g. scalar, object, collection, etc).
"""
def __init__(self):
self._configure_started = False
self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
relationships between mappers and perform other post-mapper-creation
initialization steps.
"""
self._configure_started = True
self.do_init()
self._configure_finished = True
@property
def class_attribute(self):
"""Return the class-bound descriptor corresponding to this
:class:`.MapperProperty`.
This is basically a ``getattr()`` call::
return getattr(self.parent.class_, self.key)
I.e. if this :class:`.MapperProperty` were named ``addresses``,
and the class to which it is mapped is ``User``, this sequence
is possible::
>>> from sqlalchemy import inspect
>>> mapper = inspect(User)
>>> addresses_property = mapper.attrs.addresses
>>> addresses_property.class_attribute is User.addresses
True
>>> User.addresses.property is addresses_property
True
"""
return getattr(self.parent.class_, self.key)
def do_init(self):
"""Perform subclass-specific initialization post-mapper-creation
steps.
This is a template method called by the ``MapperProperty``
object's init() method.
"""
def post_instrument_class(self, mapper):
"""Perform instrumentation adjustments that need to occur
after init() has completed.
The given Mapper is the Mapper invoking the operation, which
may not be the same Mapper as self.parent in an inheritance
scenario; however, Mapper will always at least be a sub-mapper of
self.parent.
This method is typically used by StrategizedProperty, which delegates
it to LoaderStrategy.init_class_attribute() to perform final setup
on the class-bound InstrumentedAttribute.
"""
def merge(
self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
load,
_recursive,
_resolve_conflict_map,
):
"""Merge the attribute represented by this ``MapperProperty``
from source to destination object.
"""
def __repr__(self):
return "<%s at 0x%x; %s>" % (
self.__class__.__name__,
id(self),
getattr(self, "key", "no key"),
)
@inspection._self_inspects
class PropComparator(operators.ColumnOperators):
r"""Defines SQL operators for :class:`.MapperProperty` objects.
SQLAlchemy allows for operators to
be redefined at both the Core and ORM level. :class:`.PropComparator`
is the base class of operator redefinition for ORM-level operations,
including those of :class:`.ColumnProperty`,
:class:`.RelationshipProperty`, and :class:`.CompositeProperty`.
.. note:: With the advent of Hybrid properties introduced in SQLAlchemy
0.7, as well as Core-level operator redefinition in
SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator`
instances is extremely rare. See :ref:`hybrids_toplevel` as well
as :ref:`types_operators`.
User-defined subclasses of :class:`.PropComparator` may be created. The
built-in Python comparison and math operator methods, such as
:meth:`.operators.ColumnOperators.__eq__`,
:meth:`.operators.ColumnOperators.__lt__`, and
:meth:`.operators.ColumnOperators.__add__`, can be overridden to provide
new operator behavior. The custom :class:`.PropComparator` is passed to
the :class:`.MapperProperty` instance via the ``comparator_factory``
argument. In each case,
the appropriate subclass of :class:`.PropComparator` should be used::
# definition of custom PropComparator subclasses
from sqlalchemy.orm.properties import \
ColumnProperty,\
CompositeProperty,\
RelationshipProperty
class MyColumnComparator(ColumnProperty.Comparator):
def __eq__(self, other):
return self.__clause_element__() == other
class MyRelationshipComparator(RelationshipProperty.Comparator):
def any(self, expression):
"define the 'any' operation"
# ...
class MyCompositeComparator(CompositeProperty.Comparator):
def __gt__(self, other):
"redefine the 'greater than' operation"
return sql.and_(*[a>b for a, b in
zip(self.__clause_element__().clauses,
other.__composite_values__())])
# application of custom PropComparator subclasses
from sqlalchemy.orm import column_property, relationship, composite
from sqlalchemy import Column, String
class SomeMappedClass(Base):
some_column = column_property(Column("some_column", String),
comparator_factory=MyColumnComparator)
some_relationship = relationship(SomeOtherClass,
comparator_factory=MyRelationshipComparator)
some_composite = composite(
Column("a", String), Column("b", String),
comparator_factory=MyCompositeComparator
)
Note that for column-level operator redefinition, it's usually
simpler to define the operators at the Core level, using the
:attr:`.TypeEngine.comparator_factory` attribute. See
:ref:`types_operators` for more detail.
.. seealso::
:class:`.ColumnProperty.Comparator`
:class:`.RelationshipProperty.Comparator`
:class:`.CompositeProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = "prop", "property", "_parententity", "_adapt_to_entity"
__visit_name__ = "orm_prop_comparator"
def __init__(
self,
prop,
parentmapper,
adapt_to_entity=None,
):
self.prop = self.property = prop
self._parententity = adapt_to_entity or parentmapper
self._adapt_to_entity = adapt_to_entity
def __clause_element__(self):
raise NotImplementedError("%r" % self)
def _bulk_update_tuples(self, value):
"""Receive a SQL expression that represents a value in the SET
clause of an UPDATE statement.
Return a tuple that can be passed to a :class:`_expression.Update`
construct.
"""
return [(self.__clause_element__(), value)]
def adapt_to_entity(self, adapt_to_entity):
"""Return a copy of this PropComparator which will use the given
:class:`.AliasedInsp` to produce corresponding expressions.
"""
return self.__class__(self.prop, self._parententity, adapt_to_entity)
@property
def _parentmapper(self):
"""legacy; this is renamed to _parententity to be
compatible with QueryableAttribute."""
return inspect(self._parententity).mapper
@property
def _propagate_attrs(self):
# this suits the case in coercions where we don't actually
# call ``__clause_element__()`` but still need to get
# resolved._propagate_attrs. See #6558.
return util.immutabledict(
{
"compile_state_plugin": "orm",
"plugin_subject": self._parentmapper,
}
)
@property
def adapter(self):
"""Produce a callable that adapts column expressions
to suit an aliased version of this comparator.
"""
if self._adapt_to_entity is None:
return None
else:
return self._adapt_to_entity._adapt_element
@property
def info(self):
return self.property.info
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
@staticmethod
def has_op(a, b, **kwargs):
return a.has(b, **kwargs)
@staticmethod
def of_type_op(a, class_):
return a.of_type(class_)
def of_type(self, class_):
r"""Redefine this object in terms of a polymorphic subclass,
:func:`.with_polymorphic` construct, or :func:`.aliased` construct.
Returns a new PropComparator from which further criterion can be
evaluated.
e.g.::
query.join(Company.employees.of_type(Engineer)).\
filter(Engineer.name=='foo')
:param \class_: a class or mapper indicating that criterion will be
against this specific subclass.
.. seealso::
:ref:`inheritance_of_type`
"""
return self.operate(PropComparator.of_type_op, class_)
def and_(self, *criteria):
"""Add additional criteria to the ON clause that's represented by this
relationship attribute.
E.g.::
stmt = select(User).join(
User.addresses.and_(Address.email_address != 'foo')
)
stmt = select(User).options(
joinedload(User.addresses.and_(Address.email_address != 'foo'))
)
.. versionadded:: 1.4
.. seealso::
:ref:`orm_queryguide_join_on_augmented`
:ref:`loader_option_criteria`
:func:`.with_loader_criteria`
"""
return self.operate(operators.and_, *criteria)
def any(self, criterion=None, **kwargs):
r"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.any_op, criterion, **kwargs)
def has(self, criterion=None, **kwargs):
r"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class
attribute names which will be compared via equality to the
corresponding values.
"""
return self.operate(PropComparator.has_op, criterion, **kwargs)
class StrategizedProperty(MapperProperty):
"""A MapperProperty which uses selectable strategies to affect
loading behavior.
There is a single strategy selected by default. Alternate
strategies can be selected at Query time through the usage of
``StrategizedOption`` objects via the Query.options() method.
The mechanics of StrategizedProperty are used for every Query
invocation for every mapped attribute participating in that Query,
to determine first how the attribute will be rendered in SQL
and secondly how the attribute will retrieve a value from a result
row and apply it to a mapped object. The routines here are very
performance-critical.
"""
__slots__ = (
"_strategies",
"strategy",
"_wildcard_token",
"_default_path_loader_key",
)
inherit_cache = True
strategy_wildcard_key = None
def _memoized_attr__wildcard_token(self):
return (
"%s:%s"
% (self.strategy_wildcard_key, path_registry._WILDCARD_TOKEN),
)
def _memoized_attr__default_path_loader_key(self):
return (
"loader",
(
"%s:%s"
% (self.strategy_wildcard_key, path_registry._DEFAULT_TOKEN),
),
)
def _get_context_loader(self, context, path):
load = None
search_path = path[self]
# search among: exact match, "attr.*", "default" strategy
# if any.
for path_key in (
search_path._loader_key,
search_path._wildcard_path_loader_key,
search_path._default_path_loader_key,
):
if path_key in context.attributes:
load = context.attributes[path_key]
break
return load
def _get_strategy(self, key):
try:
return self._strategies[key]
except KeyError:
pass
# run outside to prevent transfer of exception context
cls = self._strategy_lookup(self, *key)
# this previously was setting self._strategies[cls], that's
# a bad idea; should use strategy key at all times because every
# strategy has multiple keys at this point
self._strategies[key] = strategy = cls(self, key)
return strategy
def setup(self, context, query_entity, path, adapter, **kwargs):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.setup_query(
context, query_entity, path, loader, adapter, **kwargs
)
def create_row_processor(
self, context, query_entity, path, mapper, result, adapter, populators
):
loader = self._get_context_loader(context, path)
if loader and loader.strategy:
strat = self._get_strategy(loader.strategy)
else:
strat = self.strategy
strat.create_row_processor(
context,
query_entity,
path,
loader,
mapper,
result,
adapter,
populators,
)
def do_init(self):
self._strategies = {}
self.strategy = self._get_strategy(self.strategy_key)
def post_instrument_class(self, mapper):
if (
not self.parent.non_primary
and not mapper.class_manager._attr_has_impl(self.key)
):
self.strategy.init_class_attribute(mapper)
_all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
# ensure each subclass of the strategy has its
# own _strategy_keys collection
if "_strategy_keys" not in dec_cls.__dict__:
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@classmethod
def _strategy_lookup(cls, requesting_property, *key):
requesting_property.parent._with_polymorphic_mappers
for prop_cls in cls.__mro__:
if prop_cls in cls._all_strategies:
strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
pass
for property_type, strats in cls._all_strategies.items():
if key in strats:
intended_property_type = property_type
actual_strategy = strats[key]
break
else:
intended_property_type = None
actual_strategy = None
raise orm_exc.LoaderStrategyException(
cls,
requesting_property,
intended_property_type,
actual_strategy,
key,
)
class ORMOption(ExecutableOption):
"""Base class for option objects that are passed to ORM queries.
These options may be consumed by :meth:`.Query.options`,
:meth:`.Select.options`, or in a more general sense by any
:meth:`.Executable.options` method. They are interpreted at
statement compile time or execution time in modern use. The
deprecated :class:`.MapperOption` is consumed at ORM query construction
time.
.. versionadded:: 1.4
"""
__slots__ = ()
_is_legacy_option = False
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" SELECT statements that occur for relationship
lazy loaders as well as attribute load / refresh operations.
"""
_is_compile_state = False
_is_criteria_option = False
_is_strategy_option = False
class LoaderOption(ORMOption):
"""Describe a loader modification to an ORM statement at compilation time.
.. versionadded:: 1.4
"""
_is_compile_state = True
def process_compile_state_replaced_entities(
self, compile_state, mapper_entities
):
"""Apply a modification to a given :class:`.CompileState`,
given entities that were replaced by with_only_columns() or
with_entities().
.. versionadded:: 1.4.19
"""
self.process_compile_state(compile_state)
def process_compile_state(self, compile_state):
"""Apply a modification to a given :class:`.CompileState`."""
class CriteriaOption(ORMOption):
"""Describe a WHERE criteria modification to an ORM statement at
compilation time.
.. versionadded:: 1.4
"""
_is_compile_state = True
_is_criteria_option = True
def process_compile_state(self, compile_state):
"""Apply a modification to a given :class:`.CompileState`."""
def get_global_criteria(self, attributes):
"""update additional entity criteria options in the given
attributes dictionary.
"""
class UserDefinedOption(ORMOption):
"""Base class for a user-defined option that can be consumed from the
:meth:`.SessionEvents.do_orm_execute` event hook.
"""
_is_legacy_option = False
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def __init__(self, payload=None):
self.payload = payload
@util.deprecated_cls(
"1.4",
"The :class:`.MapperOption class is deprecated and will be removed "
"in a future release. For "
"modifications to queries on a per-execution basis, use the "
":class:`.UserDefinedOption` class to establish state within a "
":class:`.Query` or other Core statement, then use the "
":meth:`.SessionEvents.before_orm_execute` hook to consume them.",
constructor=None,
)
class MapperOption(ORMOption):
"""Describe a modification to a Query"""
_is_legacy_option = True
propagate_to_loaders = False
"""if True, indicate this option should be carried along
to "secondary" Query objects produced during lazy loads
or refresh operations.
"""
def process_query(self, query):
"""Apply a modification to the given :class:`_query.Query`."""
def process_query_conditionally(self, query):
"""same as process_query(), except that this option may not
apply to the given query.
This is typically applied during a lazy load or scalar refresh
operation to propagate options stated in the original Query to the
new Query being used for the load. It occurs for those options that
specify propagate_to_loaders=True.
"""
self.process_query(query)
class LoaderStrategy(object):
"""Describe the loading behavior of a StrategizedProperty object.
The ``LoaderStrategy`` interacts with the querying process in three
ways:
* it controls the configuration of the ``InstrumentedAttribute``
placed on a class to handle the behavior of the attribute. this
may involve setting up class-level callable functions to fire
off a select operation when the attribute is first accessed
(i.e. a lazy load)
* it processes the ``QueryContext`` at statement construction time,
where it can modify the SQL statement that is being produced.
For example, simple column attributes will add their represented
column to the list of selected columns, a joined eager loader
may establish join clauses to add to the statement.
* It produces "row processor" functions at result fetching time.
These "row processor" functions populate a particular attribute
on a particular mapped instance.
"""
__slots__ = (
"parent_property",
"is_class_level",
"parent",
"key",
"strategy_key",
"strategy_opts",
)
def __init__(self, parent, strategy_key):
self.parent_property = parent
self.is_class_level = False
self.parent = self.parent_property.parent
self.key = self.parent_property.key
self.strategy_key = strategy_key
self.strategy_opts = dict(strategy_key)
def init_class_attribute(self, mapper):
pass
def setup_query(
self, compile_state, query_entity, path, loadopt, adapter, **kwargs
):
"""Establish column and other state for a given QueryContext.
This method fulfills the contract specified by MapperProperty.setup().
StrategizedProperty delegates its setup() method
directly to this method.
"""
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
"""Establish row processing functions for a given QueryContext.
This method fulfills the contract specified by
MapperProperty.create_row_processor().
StrategizedProperty delegates its create_row_processor() method
directly to this method.
"""
def __str__(self):
return str(self.parent_property)
| |
# Copyright (C) 2004 Anthony Baxter
from xshtoom.rtp.formats import PT_PCMU, PT_GSM, PT_SPEEX, PT_DVI4, PT_RAW
from xshtoom.rtp.formats import PT_PCMA, PT_ILBC
from xshtoom.rtp.formats import PT_CN, PT_xCN
from xshtoom.avail import codecs
from xshtoom.audio import aufile, playout
from zope.interface import Interface, implements
from twisted.python import log
import sets, struct
try:
import audioop
except ImportError:
audioop = None
class NullEncoder:
def handle_audio(self, data):
pass
nullencoder = NullEncoder()
class MediaSample:
def __init__(self, ct, data):
self.ct = ct
self.data = data
def __repr__(self):
return "<%s/%s, %s>" % (self.__class__.__name__, self.ct, `self.data`,)
class NullConv:
# XXX Should be refactored away - MediaLayer is the only derived class
def __init__(self, device):
self._d = device
def getDevice(self):
return self._d
def setDevice(self, d):
self._d = d
def getFormats(self):
if self._d:
return self._d.getFormats()
def write(self, data):
if self._d:
return self._d.write(data)
def close(self):
if self._d:
log.msg("audio device %r close"%(self._d,), system="audio")
return self._d.close()
def reopen(self):
if self._d:
log.msg("audio device %r reopen ..."%(self._d,), system="audio")
return self._d.reopen()
def isOpen(self):
if self._d:
return self._d.isOpen()
def __repr__(self):
return '<%s wrapped around %r>'%(self.__class__.__name__, self._d)
def isLittleEndian():
import struct
p = struct.pack('H', 1)
if p == '\x01\x00':
return True
elif p == '\x00\x01':
return False
else:
raise ValueError("insane endian-check result %r"%(p))
class IAudioCodec(Interface):
def buffer_and_encode(self, bytes):
"encode bytes, a string of audio"
def decode(self, bytes):
"decode bytes, a string of audio"
class _Codec:
"Base class for codecs"
implements(IAudioCodec)
def __init__(self, samplesize):
self.samplesize = samplesize
self.b = ''
def buffer_and_encode(self, bytes):
self.b += bytes
res = []
while len(self.b) >= self.samplesize:
sample, self.b = self.b[:self.samplesize], self.b[self.samplesize:]
res.append(self._encode(sample))
return res
class GSMCodec(_Codec):
def __init__(self):
_Codec.__init__(self, 320)
if isLittleEndian():
self.enc = codecs.gsm.gsm(codecs.gsm.LITTLE)
self.dec = codecs.gsm.gsm(codecs.gsm.LITTLE)
else:
self.enc = codecs.gsm.gsm(codecs.gsm.BIG)
self.dec = codecs.gsm.gsm(codecs.gsm.BIG)
def _encode(self, bytes):
assert isinstance(bytes, str), bytes
return self.enc.encode(bytes)
def decode(self, bytes):
assert isinstance(bytes, str), bytes
if len(bytes) != 33:
log.msg("GSM: short read on decode, %d != 33"%len(bytes),
system="codec")
return None
return self.dec.decode(bytes)
class SpeexCodec(_Codec):
"A codec for Speex"
def __init__(self):
self.enc = codecs.speex.new(8)
self.dec = codecs.speex.new(8)
_Codec.__init__(self, 320)
def _encode(self, bytes, unpack=struct.unpack):
frames = list(unpack('160h', bytes))
return self.enc.encode(frames)
def decode(self, bytes):
if len(bytes) != 40:
log.msg("speex: short read on decode %d != 40"%len(bytes),
system="codec")
return None
frames = self.dec.decode(bytes)
ostr = struct.pack('160h', *frames)
return ostr
class MulawCodec(_Codec):
"A codec for mulaw encoded audio (G.711U, PCMU)"
def __init__(self):
_Codec.__init__(self, 320)
self.buf = ''
def _encode(self, bytes):
return audioop.lin2ulaw(bytes, 2)
def decode(self, bytes):
if len(bytes) != 160:
log.msg("mulaw: short read on decode, %d != 160"%len(bytes),
system="codec")
if 0:
bytes = audioop.ulaw2lin(bytes, 2)
self.buf += bytes
if len(self.buf) > 159:
out, self.buf = self.buf[:160], self.buf[160:]
return out
else:
return audioop.ulaw2lin(bytes, 2)
class AlawCodec(_Codec):
"A codec for alaw encoded audio (G.711A, PCMA)"
def __init__(self):
_Codec.__init__(self, 320)
def _encode(self, bytes):
return audioop.lin2alaw(bytes, 2)
def decode(self, bytes):
if len(bytes) != 160:
log.msg("alaw: short read on decode, %d != 160"%len(bytes),
system="codec")
return audioop.alaw2lin(bytes, 2)
class NullCodec(_Codec):
"A codec that consumes/emits nothing (e.g. for confort noise)"
def __init__(self):
_Codec.__init__(self, 1)
def _encode(self, bytes):
return None
def decode(self, bytes):
return None
class PassthruCodec(_Codec):
"A codec that leaves it's input alone"
def __init__(self):
_Codec.__init__(self, None)
decode = lambda self, bytes: bytes
buffer_and_encode = lambda self, bytes: [bytes]
def make_codec_set():
format_to_codec = {}
format_to_codec[PT_CN] = NullCodec()
format_to_codec[PT_xCN] = NullCodec()
format_to_codec[PT_RAW] = PassthruCodec()
assert codecs.mulaw
if codecs.mulaw is not None:
format_to_codec[PT_PCMU] = MulawCodec()
if codecs.alaw is not None:
format_to_codec[PT_PCMA] = AlawCodec()
if codecs.gsm is not None:
format_to_codec[PT_GSM] = GSMCodec()
if codecs.speex is not None:
format_to_codec[PT_SPEEX] = SpeexCodec()
#if codecs.dvi4 is not None:
# format_to_codec[PT_DVI4] = DVI4Codec()
#if codecs.ilbc is not None:
# format_to_codec[PT_ILBC] = ILBCCodec()
return format_to_codec
known_formats = (sets.ImmutableSet(make_codec_set().keys()) -
sets.ImmutableSet([PT_CN, PT_xCN,]))
class Codecker:
def __init__(self, format):
self.format_to_codec = make_codec_set()
if not format in known_formats:
raise ValueError("Can't handle codec %r"%format)
self.format = format
self.handler = None
def set_handler(self, handler):
"""
handler will subsequently receive calls to handle_media_sample().
"""
self.handler = handler
def getDefaultFormat(self):
return self.format
def handle_audio(self, bytes):
"Accept audio as bytes, emits MediaSamples."
if not bytes:
return None
codec = self.format_to_codec.get(self.format)
if not codec:
raise ValueError("can't encode format %r"%self.format)
encaudios = codec.buffer_and_encode(bytes)
for encaudio in encaudios:
samp = MediaSample(self.format, encaudio)
if self.handler is not None:
self.handler(samp)
else:
return samp
def decode(self, packet):
"Accepts an RTPPacket, emits audio as bytes"
if not packet.data:
return None
codec = self.format_to_codec.get(packet.header.ct)
if not codec:
raise ValueError("can't decode format %r"%packet.header.ct)
encaudio = codec.decode(packet.data)
return encaudio
class MediaLayer(NullConv):
""" The MediaLayer sits between the network and the raw
audio device. It converts the audio to/from the codec on
the network to the format used by the lower-level audio
devices (16 bit signed ints at an integer multiple of 8KHz).
"""
_playfile_LC = None
_playfile_fp = None
def __init__(self, device, *args, **kwargs):
self.playout = None
self.codecker = None
self.defaultFormat = None
# this sets self._d = device
NullConv.__init__(self, device, *args, **kwargs)
def getFormat(self):
return self.defaultFormat
def write(self, packet):
if self.playout is None:
log.msg("write before reopen, discarding")
return 0
audio = self.codecker.decode(packet)
if audio:
return self.playout.write(audio, packet.header.seq)
else:
self.playout.write('', packet.header.seq)
return 0
def selectDefaultFormat(self, fmts=[PT_PCMU,]):
assert isinstance(fmts, (list, tuple,)), fmts
assert not self._d or not self._d.isOpen(), \
"close device %r before calling selectDefaultFormat()" % (self._d,)
for f in fmts:
if f in known_formats:
self.defaultFormat = f
break
else:
raise ValueError("No working formats!")
def reopen(self, mediahandler=None):
"""
mediahandler, if not None, is a callable that will be called with
a media sample is available.
This flushes codec buffers. The audio playout buffers and microphone
readin buffers *ought* to be flushed by the lower-layer audio device
when we call reopen() on it.
"""
assert self.defaultFormat, "must call selectDefaultFormat()"+\
"before (re-)opening the device."
self.codecker = Codecker(self.defaultFormat)
self._d.reopen()
if mediahandler:
self.codecker.set_handler(mediahandler)
self._d.set_encoder(self.codecker)
else:
self._d.set_encoder(nullencoder)
if self.playout:
log.msg("playout already started")
else:
self.playout = playout.Playout(self)
def playWaveFile(self, fname):
from twisted.internet.task import LoopingCall
# stop any existing wave file playback
self.stopWaveFile()
if not self._d.isOpen():
self.selectDefaultFormat([PT_PCMU,])
self.reopen()
else:
self.close()
self.selectDefaultFormat([PT_PCMU,])
self.reopen()
self._playfile_fp = aufile.WavReader(fname)
self._playfile_LC = LoopingCall(self._playWaveFileLoopingCall)
self._playfile_LC.start(0.020)
def _playWaveFileLoopingCall(self):
if self._playfile_fp is None:
return
data = self._playfile_fp.read(160)
if data:
self._d.write(data)
else:
self._playfile_fp.reset()
def stopWaveFile(self):
if self._playfile_LC is not None:
self._playfile_LC.stop()
self._playfile_LC = None
self._playfile_fp = None
def close(self):
self.playout = None
self.codecker = None
self._d.set_encoder(nullencoder)
NullConv.close(self)
class DougConverter(MediaLayer):
"Specialised converter for Doug."
# XXX should be refactored away to just use a Codecker directly
def __init__(self, defaultFormat=PT_PCMU, *args, **kwargs):
self.codecker = Codecker(defaultFormat)
self.convertInbound = self.codecker.decode
self.convertOutbound = self.codecker.handle_audio
self.set_handler = self.codecker.set_handler
if not kwargs.get('device'):
kwargs['device'] = None
NullConv.__init__(self, *args, **kwargs)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class l3param(base_resource) :
""" Configuration for Layer 3 related parameter resource. """
def __init__(self) :
self._srcnat = ""
self._icmpgenratethreshold = 0
self._overridernat = ""
self._dropdfflag = ""
self._miproundrobin = ""
self._externalloopback = ""
self._tnlpmtuwoconn = ""
self._usipserverstraypkt = ""
self._forwardicmpfragments = ""
self._dropipfragments = ""
self._acllogtime = 0
self._icmperrgenerate = ""
@property
def srcnat(self) :
ur"""Perform NAT if only the source is in the private network.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._srcnat
except Exception as e:
raise e
@srcnat.setter
def srcnat(self, srcnat) :
ur"""Perform NAT if only the source is in the private network.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._srcnat = srcnat
except Exception as e:
raise e
@property
def icmpgenratethreshold(self) :
ur"""NS generated ICMP pkts per 10ms rate threshold.<br/>Default value: 100.
"""
try :
return self._icmpgenratethreshold
except Exception as e:
raise e
@icmpgenratethreshold.setter
def icmpgenratethreshold(self, icmpgenratethreshold) :
ur"""NS generated ICMP pkts per 10ms rate threshold.<br/>Default value: 100
"""
try :
self._icmpgenratethreshold = icmpgenratethreshold
except Exception as e:
raise e
@property
def overridernat(self) :
ur"""USNIP/USIP settings override RNAT settings for configured
service/virtual server traffic.. .<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._overridernat
except Exception as e:
raise e
@overridernat.setter
def overridernat(self, overridernat) :
ur"""USNIP/USIP settings override RNAT settings for configured
service/virtual server traffic.. .<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._overridernat = overridernat
except Exception as e:
raise e
@property
def dropdfflag(self) :
ur"""Enable dropping the IP DF flag.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dropdfflag
except Exception as e:
raise e
@dropdfflag.setter
def dropdfflag(self, dropdfflag) :
ur"""Enable dropping the IP DF flag.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dropdfflag = dropdfflag
except Exception as e:
raise e
@property
def miproundrobin(self) :
ur"""Enable round robin usage of mapped IPs.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._miproundrobin
except Exception as e:
raise e
@miproundrobin.setter
def miproundrobin(self, miproundrobin) :
ur"""Enable round robin usage of mapped IPs.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._miproundrobin = miproundrobin
except Exception as e:
raise e
@property
def externalloopback(self) :
ur"""Enable external loopback.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._externalloopback
except Exception as e:
raise e
@externalloopback.setter
def externalloopback(self, externalloopback) :
ur"""Enable external loopback.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._externalloopback = externalloopback
except Exception as e:
raise e
@property
def tnlpmtuwoconn(self) :
ur"""Enable external loopback.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._tnlpmtuwoconn
except Exception as e:
raise e
@tnlpmtuwoconn.setter
def tnlpmtuwoconn(self, tnlpmtuwoconn) :
ur"""Enable external loopback.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._tnlpmtuwoconn = tnlpmtuwoconn
except Exception as e:
raise e
@property
def usipserverstraypkt(self) :
ur"""Enable detection of stray server side pkts in USIP mode.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._usipserverstraypkt
except Exception as e:
raise e
@usipserverstraypkt.setter
def usipserverstraypkt(self, usipserverstraypkt) :
ur"""Enable detection of stray server side pkts in USIP mode.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._usipserverstraypkt = usipserverstraypkt
except Exception as e:
raise e
@property
def forwardicmpfragments(self) :
ur"""Enable forwarding of ICMP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._forwardicmpfragments
except Exception as e:
raise e
@forwardicmpfragments.setter
def forwardicmpfragments(self, forwardicmpfragments) :
ur"""Enable forwarding of ICMP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._forwardicmpfragments = forwardicmpfragments
except Exception as e:
raise e
@property
def dropipfragments(self) :
ur"""Enable dropping of IP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dropipfragments
except Exception as e:
raise e
@dropipfragments.setter
def dropipfragments(self, dropipfragments) :
ur"""Enable dropping of IP fragments.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dropipfragments = dropipfragments
except Exception as e:
raise e
@property
def acllogtime(self) :
ur"""Parameter to tune acl logging time.<br/>Default value: 5000.
"""
try :
return self._acllogtime
except Exception as e:
raise e
@acllogtime.setter
def acllogtime(self, acllogtime) :
ur"""Parameter to tune acl logging time.<br/>Default value: 5000
"""
try :
self._acllogtime = acllogtime
except Exception as e:
raise e
@property
def icmperrgenerate(self) :
ur"""Enable/Disable fragmentation required icmp error generation, before encapsulating a packet with vPath header. This knob is only functional for vPath Environment.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._icmperrgenerate
except Exception as e:
raise e
@icmperrgenerate.setter
def icmperrgenerate(self, icmperrgenerate) :
ur"""Enable/Disable fragmentation required icmp error generation, before encapsulating a packet with vPath header. This knob is only functional for vPath Environment.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._icmperrgenerate = icmperrgenerate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(l3param_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.l3param
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update l3param.
"""
try :
if type(resource) is not list :
updateresource = l3param()
updateresource.srcnat = resource.srcnat
updateresource.icmpgenratethreshold = resource.icmpgenratethreshold
updateresource.overridernat = resource.overridernat
updateresource.dropdfflag = resource.dropdfflag
updateresource.miproundrobin = resource.miproundrobin
updateresource.externalloopback = resource.externalloopback
updateresource.tnlpmtuwoconn = resource.tnlpmtuwoconn
updateresource.usipserverstraypkt = resource.usipserverstraypkt
updateresource.forwardicmpfragments = resource.forwardicmpfragments
updateresource.dropipfragments = resource.dropipfragments
updateresource.acllogtime = resource.acllogtime
updateresource.icmperrgenerate = resource.icmperrgenerate
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of l3param resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = l3param()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the l3param resources that are configured on netscaler.
"""
try :
if not name :
obj = l3param()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Icmperrgenerate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dropipfragments:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Overridernat:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Tnlpmtuwoconn:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Usipserverstraypkt:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Srcnat:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Externalloopback:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Forwardicmpfragments:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Dropdfflag:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Miproundrobin:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class l3param_response(base_response) :
def __init__(self, length=1) :
self.l3param = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.l3param = [l3param() for _ in range(length)]
| |
# -*- coding: latin-1 -*-
"HTML Renderer for FPDF.py"
__author__ = "Mariano Reingart <reingart@gmail.com>"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc.
from fpdf import FPDF
from HTMLParser import HTMLParser
DEBUG = False
def px2mm(px):
return int(px)*25.4/72.0
def hex2dec(color = "#000000"):
if color:
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
return r, g, b
class HTML2FPDF(HTMLParser):
"Render basic HTML to FPDF"
def __init__(self, pdf):
HTMLParser.__init__(self)
self.style = {}
self.pre = False
self.href = ''
self.align = ''
self.page_links = {}
self.font_list = ("times","courier", "helvetica")
self.pdf = pdf
self.r = self.g = self.b = 0
self.indent = 0
self.bullet = []
self.set_font("times", 12)
self.table = None # table attributes
self.table_col_width = None # column (header) widths
self.table_col_index = None # current column index
self.td = None # cell attributes
self.th = False # header enabled
self.tr = None
self.theader = None # table header cells
self.tfooter = None # table footer cells
self.thead = None
self.tfoot = None
self.theader_out = self.tfooter_out = False
def width2mm(self, length):
if length[-1]=='%':
total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
if self.table['width'][-1]=='%':
total *= int(self.table['width'][:-1])/100.0
return int(length[:-1]) * total / 101.0
else:
return int(length) / 6.0
def handle_data(self, txt):
if self.td is not None: # drawing a table?
if 'width' not in self.td and 'colspan' not in self.td:
l = [self.table_col_width[self.table_col_index]]
elif 'colspan' in self.td:
i = self.table_col_index
colspan = int(self.td['colspan'])
l = self.table_col_width[i:i+colspan]
else:
l = [self.td.get('width','240')]
w = sum([self.width2mm(lenght) for lenght in l])
h = int(self.td.get('height', 0)) / 4 or self.h*1.30
self.table_h = h
border = int(self.table.get('border', 0))
if not self.th:
align = self.td.get('align', 'L')[0].upper()
border = border and 'LR'
else:
self.set_style('B',True)
border = border or 'B'
align = 'C'
bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', '')))
# parsing table header/footer (drawn later):
if self.thead is not None:
self.theader.append(((w,h,txt,border,0,align), bgcolor))
if self.tfoot is not None:
self.tfooter.append(((w,h,txt,border,0,align), bgcolor))
# check if reached end of page, add table footer and header:
height = h + (self.tfooter and self.tfooter[0][0][1] or 0)
if self.pdf.y+height>self.pdf.page_break_trigger and not self.th:
self.output_table_footer()
self.pdf.add_page()
self.theader_out = self.tfooter_out = False
if self.tfoot is None and self.thead is None:
if not self.theader_out:
self.output_table_header()
self.box_shadow(w, h, bgcolor)
if DEBUG: print "td cell", self.pdf.x, w, txt, "*"
self.pdf.cell(w,h,txt,border,0,align)
elif self.table is not None:
# ignore anything else than td inside a table
pass
elif self.align:
if DEBUG: print "cell", txt, "*"
self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href)
else:
txt = txt.replace("\n"," ")
if self.href:
self.put_link(self.href,txt)
else:
if DEBUG: print "write", txt, "*"
self.pdf.write(self.h,txt)
def box_shadow(self, w, h, bgcolor):
if DEBUG: print "box_shadow", w, h, bgcolor
if bgcolor:
fill_color = self.pdf.fill_color
self.pdf.set_fill_color(*bgcolor)
self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F')
self.pdf.fill_color = fill_color
def output_table_header(self):
if self.theader:
b = self.b
x = self.pdf.x
self.pdf.set_x(self.table_offset)
self.set_style('B',True)
for cell, bgcolor in self.theader:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.set_style('B',b)
self.pdf.ln(self.theader[0][0][1])
self.pdf.set_x(self.table_offset)
#self.pdf.set_x(x)
self.theader_out = True
def output_table_footer(self):
if self.tfooter:
x = self.pdf.x
self.pdf.set_x(self.table_offset)
#TODO: self.output_table_sep()
for cell, bgcolor in self.tfooter:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.pdf.ln(self.tfooter[0][0][1])
self.pdf.set_x(x)
if int(self.table.get('border', 0)):
self.output_table_sep()
self.tfooter_out = True
def output_table_sep(self):
self.pdf.set_x(self.table_offset)
x1 = self.pdf.x
y1 = self.pdf.y
w = sum([self.width2mm(lenght) for lenght in self.table_col_width])
self.pdf.line(x1,y1,x1+w,y1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if DEBUG: print "STARTTAG", tag, attrs
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag,1)
if tag=='a':
self.href=attrs['href']
if tag=='br':
self.pdf.ln(5)
if tag=='p':
self.pdf.ln(5)
if attrs:
self.align=attrs['align'].lower()
if tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
k = (2, 1.5, 1.17, 1, 0.83, 0.67)[int(tag[1])]
self.pdf.ln(5*k)
self.pdf.set_text_color(150,0,0)
self.pdf.set_font_size(12 * k)
if attrs: self.align = attrs.get('align')
if tag=='hr':
self.put_line()
if tag=='pre':
self.pdf.set_font('Courier','',11)
self.pdf.set_font_size(11)
self.set_style('B',False)
self.set_style('I',False)
self.pre = True
if tag=='blockquote':
self.set_text_color(100,0,45)
self.pdf.ln(3)
if tag=='ul':
self.indent+=1
self.bullet.append('\x95')
if tag=='ol':
self.indent+=1
self.bullet.append(0)
if tag=='li':
self.pdf.ln(self.h+2)
self.pdf.set_text_color(190,0,0)
bullet = self.bullet[self.indent-1]
if not isinstance(bullet, basestring):
bullet += 1
self.bullet[self.indent-1] = bullet
bullet = "%s. " % bullet
self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet))
self.set_text_color()
if tag=='font':
if 'color' in attrs:
self.color = hex2dec(attrs['color'])
self.set_text_color(*color)
self.color = color
if 'face' in attrs and attrs['face'].lower() in self.font_list:
face = attrs.get('face').lower()
self.pdf.set_font(face)
self.font_face = face
if 'size' in attrs:
face = attrs.get('size')
self.pdf.set_font('', size)
self.font_size = size
if tag=='table':
self.table = dict([(k.lower(), v) for k,v in attrs.items()])
if not 'width' in self.table:
self.table['width'] = '100%'
if self.table['width'][-1]=='%':
w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
w *= int(self.table['width'][:-1])/100.0
self.table_offset = (self.pdf.w-w)/2.0
self.table_col_width = []
self.theader_out = self.tfooter_out = False
self.theader = []
self.tfooter = []
self.thead = None
self.tfoot = None
self.pdf.ln()
if tag=='tr':
self.tr = dict([(k.lower(), v) for k,v in attrs.items()])
self.table_col_index = 0
self.pdf.set_x(self.table_offset)
if tag=='td':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
if tag=='th':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
self.th = True
if self.td['width']:
self.table_col_width.append(self.td['width'])
if tag=='thead':
self.thead = {}
if tag=='tfoot':
self.tfoot = {}
if tag=='img':
if 'src' in attrs:
x = self.pdf.get_x()
y = self.pdf.get_y()
w = px2mm(attrs.get('width', 0))
h = px2mm(attrs.get('height',0))
if self.align and self.align[0].upper() == 'C':
x = (self.pdf.w-x)/2.0 - w/2.0
self.pdf.image(attrs['src'], x, y, w, h, link=self.href)
self.pdf.set_x(x+w)
self.pdf.set_y(y+h)
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, True)
if tag=='center':
self.align = 'Center'
def handle_endtag(self, tag):
#Closing tag
if DEBUG: print "ENDTAG", tag
if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4':
self.pdf.ln(6)
self.set_font()
self.set_style()
self.align = None
if tag=='pre':
self.pdf.set_font(self.font or 'Times','',12)
self.pdf.set_font_size(12)
self.pre=False
if tag=='blockquote':
self.set_text_color(0,0,0)
self.pdf.ln(3)
if tag=='strong':
tag='b'
if tag=='em':
tag='i'
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, False)
if tag=='a':
self.href=''
if tag=='p':
self.align=''
if tag in ('ul', 'ol'):
self.indent-=1
self.bullet.pop()
if tag=='table':
if not self.tfooter_out:
self.output_table_footer()
self.table = None
self.th = False
self.theader = None
self.tfooter = None
self.pdf.ln()
if tag=='thead':
self.thead = None
if tag=='tfoot':
self.tfoot = None
if tag=='tbody':
# draw a line separator between table bodies
self.pdf.set_x(self.table_offset)
self.output_table_sep()
if tag=='tr':
h = self.table_h
if self.tfoot is None:
self.pdf.ln(h)
self.tr = None
if tag=='td' or tag=='th':
if self.th:
if DEBUG: print "revert style"
self.set_style('B', False) # revert style
self.table_col_index += int(self.td.get('colspan','1'))
self.td = None
self.th = False
if tag=='font':
if self.color:
self.pdf.set_text_color(0,0,0)
self.color = None
if self.font:
self.SetFont('Times','',12)
self.font = None
if tag=='center':
self.align = None
def set_font(self, face=None, size=None):
if face:
self.font_face = face
if size:
self.font_size = size
self.h = size / 72.0*25.4
if DEBUG: print "H", self.h
self.pdf.set_font(self.font_face or 'times','',12)
self.pdf.set_font_size(self.font_size or 12)
self.set_style('u', False)
self.set_style('b', False)
self.set_style('i', False)
self.set_text_color()
def set_style(self, tag=None, enable=None):
#Modify style and select corresponding font
if tag:
t = self.style.get(tag.lower())
self.style[tag.lower()] = enable
style=''
for s in ('b','i','u'):
if self.style.get(s):
style+=s
if DEBUG: print "SET_FONT_STYLE", style
self.pdf.set_font('',style)
def set_text_color(self, r=None, g=0, b=0):
if r is None:
self.pdf.set_text_color(self.r,self.g,self.b)
else:
self.pdf.set_text_color(r, g, b)
self.r = r
self.g = g
self.b = b
def put_link(self, url, txt):
#Put a hyperlink
self.set_text_color(0,0,255)
self.set_style('u', True)
self.pdf.write(5,txt,url)
self.set_style('u', False)
self.set_text_color(0)
def put_line(self):
self.pdf.ln(2)
self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y())
self.pdf.ln(3)
class HTMLMixin():
def write_html(self, text):
"Parse HTML and convert it to PDF"
h2p = HTML2FPDF(self)
h2p.feed(text)
if __name__=='__main__':
html="""
<H1 align="center">html2fpdf</H1>
<h2>Basic usage</h2>
<p>You can now easily print text mixing different
styles : <B>bold</B>, <I>italic</I>, <U>underlined</U>, or
<B><I><U>all at once</U></I></B>!<BR>You can also insert links
on text, such as <A HREF="http://www.fpdf.org">www.fpdf.org</A>,
or on an image: click on the logo.<br>
<center>
<A HREF="http://www.fpdf.org"><img src="tutorial/logo.png" width="104" height="71"></A>
</center>
<h3>Sample List</h3>
<ul><li>option 1</li>
<ol><li>option 2</li></ol>
<li>option 3</li></ul>
<table border="0" align="center" width="50%">
<thead><tr><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead>
<tbody>
<tr><td>cell 1</td><td>cell 2</td></tr>
<tr><td>cell 2</td><td>cell 3</td></tr>
</tbody>
</table>
<table border="1">
<thead><tr bgcolor="#A0A0A0"><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead>
<tfoot><tr bgcolor="#E0E0E0"><td>footer 1</td><td>footer 2</td></tr></tfoot>
<tbody>
<tr><td>cell 1</td><td>cell 2</td></tr>
<tr>
<td width="30%">cell 1</td><td width="70%" bgcolor="#D0D0FF" align='right'>cell 2</td>
</tr>
</tbody>
<tbody><tr><td colspan="2">cell spanned</td></tr></tbody>
<tbody>
""" + """<tr bgcolor="#F0F0F0">
<td>cell 3</td><td>cell 4</td>
</tr><tr bgcolor="#FFFFFF">
<td>cell 5</td><td>cell 6</td>
</tr>""" * 200 + """
</tbody>
</table>
"""
class MyFPDF(FPDF, HTMLMixin):
def header(self):
self.image('tutorial/logo_pb.png',10,8,33)
self.set_font('Arial','B',15)
self.cell(80)
self.cell(30,10,'Title',1,0,'C')
self.ln(20)
def footer(self):
self.set_y(-15)
self.set_font('Arial','I',8)
txt = 'Page %s of %s' % (self.page_no(), self.alias_nb_pages())
self.cell(0,10,txt,0,0,'C')
pdf=MyFPDF()
#First page
pdf.add_page()
pdf.write_html(html)
pdf.output('html.pdf','F')
import os
os.system("evince html.pdf")
| |
#!/usr/bin/env python
import numpy as np
import collections
import logging
numStrips = 8
numLEDperStrip = 64 # no. of LEDs per strip
numLEDs = numStrips * numLEDperStrip
#np.random.seed(137)
learning_rate = 1e-9 # add comments here about what are good vals
decay_rate = 1 - 1e-9
# how much history of prox sensors to hold
t_prox_hist = 20
fsample = 1
N = int(t_prox_hist * fsample)
Naudio = 8 # number of audio bands
Nprox = 4 # number of proximity sensors
Ntime = 4 # day, hour, min, sec
Nsensors = Naudio + Nprox + Ntime
prox = np.zeros((N * Nsensors, ))
audio_target = np.array([0, 0, 1, 1.5, 1.5, 1, 1, 0]) # roughly human
prox_target = np.array([1, 1, 1, 1]) # 1 meter
time_target = np.array([0, 0, 0, 0]) # dummy
def act_fun(x):
#y = 1/(1 + np.exp(-x)) # sigmoid function
#y = np.tanh(x) # tanh function
y = x
m = np.where(x <= 0)
lam = 1
y[m] = (np.exp(x[m]) - 1)
y *= lam
return y
def dact_fun(x):
#y = x * (1-x) # derivative of sigmoid
#y = 1 - (np.tanh(x))**2 # derivative of tanh
y = np.ones_like(x)
m = np.where(x <= 0)
lam = 1
y[m] = np.exp(x[m])
y *= lam
return y
# forward propagate the input data through the matrices and thresholds
def forward_prop(input_data, weights):
b = input_data
neuron_inputs = {}
neuron_outputs = {}
for layer_name in sorted(weights.keys()):
a = np.dot(weights[layer_name].T, b) # apply the weight matrices
b = act_fun(a) # nonlinear thresholding
v = {layer_name: a}
w = {layer_name: b}
neuron_inputs.update(v)
neuron_outputs.update(w) # store the neuron outputs
output_layer_outputs = b
return neuron_inputs, neuron_outputs, output_layer_outputs
def compute_gradient(self, error_signal, input_data):
""" See here: http://neuralnetworksanddeeplearning.com/chap2.html"""
# do the gradients for the last layer first
delta = error_signal
layers = sorted(self.weights.keys())
delta *= dact_fun(self.neuron_outputs[layers[-1]])
gradients = { layers[-1] :
np.outer(self.neuron_outputs[layers[-2]], delta)}
for k in range(2, self.num_layers):
dz = dact_fun(self.neuron_inputs[layers[-k]])
delta = np.dot(self.weights[layers[-k+1]], delta) * dz
if k == (self.num_layers - 1):
dw = { layers[-k]:
np.outer(input_data, delta)}
else:
dw = { layers[-k]:
np.outer(self.neuron_outputs[layers[-k-1]], delta)}
gradients.update(dw)
return gradients
# use the computed gradients to update the weights
def update_weights(weights, expectation_g_squared, g_dict,
decay_rate, learning_rate):
epsilon = 1e-5
for layer_name in sorted(weights.keys()):
g = g_dict[layer_name]
expectation_g_squared[layer_name] = decay_rate * \
expectation_g_squared[layer_name] + \
(1 - decay_rate) * g**2
weights[layer_name] += (learning_rate * g) / \
(np.sqrt(expectation_g_squared[layer_name] + epsilon))
# reset batch gradient buffer
g_dict[layer_name] = np.zeros_like(weights[layer_name])
return weights, expectation_g_squared, g_dict
class DataBuffer(object):
def __init__(self, sources, samples):
self.sources = sources
self.samples = samples
self.collected = collections.OrderedDict().fromkeys(sources, None)
for s in self.collected:
self.collected[s] = collections.deque(maxlen = samples)
def append(self, packet):
for source in self.sources:
data = packet[source]['data']
self.collected[source].append(data)
def get(self):
for s,q in self.collected.iteritems():
if len(q) < self.samples:
return None
output = []
for source, queue in self.collected.iteritems():
output.append(np.concatenate(queue))
out = np.concatenate(output)
logging.debug(out)
return out
class ProcessData(object):
def __init__(self, sources, samples):
# input data ring buffer
self.db = DataBuffer(sources, samples)
self.weights = {}
self.neuron_inputs = {}
self.neuron_outputs = {}
# To be used with rmsprop algorithm
# (http://sebastianruder.com/optimizing-gradient-descent/index.html#rmsprop)
self.expectation_g_squared = {}
self.g_dict = {}
self.data = np.zeros((N, Nsensors))
def __call__(self, packet):
self.db.append(packet)
x = self.db.get()
# update data matrix with fresh samples
self.data[1:-1,:] = self.data[0:-2,:]
self.data[0,:] = x
data = self.data.flatten()
logging.debug(data.shape)
#logging.info(data)
logging.info("audio_blrms = " +
np.array_str(data[0:8], precision = 2))
logging.info("proximity = " +
np.array_str(data[8:12], precision = 2))
logging.debug("datetime = " +
np.array_str(data[12:16], precision = 2))
audio_mask = [1, 2, 3, 4, 5, 6]
audio_err = 1 * np.sum(data[audio_mask] - audio_target[audio_mask])
#l1_error = 0.1 * np.sum(data[0:8] - audio_target)
p_mask = [0, 1, 2, 3]
prox_dat = data[8:12]
prox_err = np.amin(np.abs(prox_dat[p_mask]
- prox_target[p_mask]))
#l1_prox_error = 0.1 * np.sum(data[8:12] - prox_target)
error_signal = audio_err + prox_err
nL = len(data)
numNeurons = [nL, 4*nL, nL, 3*numLEDs]
self.num_layers = len(numNeurons)
# build the initial weight matrices
if len(self.weights) == 0:
for j in range(self.num_layers - 1):
w = {str(j):np.random.randn(numNeurons[j],
numNeurons[j+1])/np.sqrt(numNeurons[j])}
self.weights.update(w)
for layer_name in self.weights.keys():
#print layer_name
self.expectation_g_squared[layer_name] = \
np.zeros_like(self.weights[layer_name])
self.g_dict[layer_name] = \
np.zeros_like(self.weights[layer_name])
# go forward through the network
self.neuron_inputs, self.neuron_outputs, output = \
forward_prop(data, self.weights)
self.g_dict = compute_gradient(self, error_signal, data)
self.weights, self.expectation_g_squared, self.g_dict = \
update_weights(self.weights,
self.expectation_g_squared,
self.g_dict,
decay_rate, learning_rate)
bias_noise = np.random.randn(numLEDs * 3)
# scale to output range of 100 (lowered from 255 for power)
output = 100 * (output + 1) / 2
output += 10*bias_noise
output = np.round(output)
output = np.clip(output, 0, 100)
try:
logging.info("out = " + np.array_str(output[7:15],
precision = 0))
except:
logging.info("Weird value in output to LEDs...")
output = output.reshape(numLEDs, 3)
# increase brightness of jelly dome head to compensate
# for bright room
# -- selects the first 8 LEDs in each strip
j0 = np.array(range(8))
j = j0
for k in np.arange(1,8):
j = np.concatenate((j, j0 + k*64))
# scale the brightness of the 'j' LEDs by a factor of 255/100
output[j] *= 255/100
# out = bias_noise
# return 512 x 3 matrix for LEDs
return output
| |
from __future__ import unicode_literals, absolute_import
import os
import six
import base64
import json
import time
import uuid
import requests
from ..utils import utils
AUTH_TYPES = ('registry_rubber', 'basic')
# TODO: update to support new docker configuration file path.
class Auth(object):
def __init__(self, registry, registry_version, **kwargs):
"""
:param auth_type:
:param registry:
:param kwargs:
:return:
"""
self._registry = utils.validate_uri(registry)
self._registry_version = registry_version
self._ssl_cert_path = kwargs.get('ssl_cert_path', None)
self._config_path = os.path.join(os.environ.get('HOME'), '.dockercfg')
self.user = None
self.passwd = None
self.auth_type = kwargs.get('type')
self.address = kwargs.get('address', None)
self.verify = kwargs.get('verify', False)
def clean_up(self):
"""
:return:
"""
if self.auth_type == 'registry_rubber' and self.user:
self._registry_rubber_uonce('delete')
# clean up old docker configs.
user_home = os.environ.get('HOME')
for file_name in os.listdir(user_home):
if 'dockercfg' in file_name:
if file_name.count('.') is 2:
try:
parts = file_name.split('.')
delta = int(time.time()) - int(parts[1])
# default to 30 seconds?
if delta > 30:
os.remove(os.path.realpath(os.path.join(user_home, file_name)))
except Exception:
pass
@property
def address(self):
return self._address
@address.setter
def address(self, value):
if value is None:
self._address = None
else:
self._address = utils.validate_uri(value)
@property
def auth_type(self):
return self._auth_type
@auth_type.setter
def auth_type(self, value):
if isinstance(value, six.string_types):
if value not in AUTH_TYPES and value is not None:
raise ValueError("auth_type must be one of the following values: {0}, None.".format(', '.join(AUTH_TYPES)))
else:
raise LookupError("auth_type must be a string. {0} was passed.".format(value))
self._auth_type = value
@property
def config_path(self):
return self._config_path
@property
def registry(self):
return self._registry
@property
def registry_version(self):
return self._registry_version
@property
def ssl_cert_path(self):
return self._ssl_cert_path
@property
def verify(self):
return self._verify
@verify.setter
def verify(self, value):
if not isinstance(value, bool):
raise TypeError("verify must be bool. {0} was passed.".format(value))
self._verify = value
##
# public methods
##
def load_dockercfg(self):
"""
:return:
"""
if self.ssl_cert_path:
self._validate_ssl_certs()
if self.auth_type == 'registry_rubber':
self.user, self.passwd = self._registry_rubber_uonce('add')
self._config_path = self._create_dockercfg(
self.user,
self.passwd,
os.path.join(os.environ.get('HOME'), '.{0}.dockercfg'.format(self.user))
)
else:
if not os.path.isfile(self.config_path):
raise ValueError("Couldn't find dockercfg file: {0}".format(self.config_path))
with open(self.config_path, 'r') as f:
try:
config = json.loads(f.read())
except Exception:
raise SyntaxError("{0} doesn't container valid json.".format(self.config_path))
if self.registry not in config:
raise LookupError("Was unable to find {0} in {1}".format(self.registry, self.config_path))
registry_config = config[self.registry]
if 'auth' not in registry_config:
raise LookupError("Was unable to find 'auth' obj for {0} in {1}".format(self.registry, self.config_path))
credentials = base64.decodestring(registry_config['auth'])
self.user = credentials.get('user')
self.user = credentials.get('password')
##
# private methods
##
@utils.retry
def _registry_rubber_uonce(self, action):
if not isinstance(action, six.string_types):
raise TypeError("action must be a string. {0} was passed.".format(action))
user = "{0}".format(int(time.time()))
passwd = six.text_type(uuid.uuid4()).replace('-', '')
params = {"user": user, "passwd": passwd}
url = "{0}/dregister_users/{1}".format(self.address, action)
if self.ssl_cert_path:
certs = (os.path.join(self.ssl_cert_path, 'cert.pem'), os.path.join(self.ssl_cert_path, 'key.pem'))
response = requests.get(url, params=params, cert=certs, verify=self.verify)
else:
response = requests.get(url, params=params)
if response.ok:
return user, passwd
else:
raise LookupError("Was unable to create new user with registry rubber: {0}".format(url))
# TODO: need to make this python three compatible.
def _create_dockercfg(self, user, passwd, config_path, email=""):
registry = "{0}/{1}/".format(self.registry, self.registry_version)
encoded_data = base64.encodestring("{0}:{1}".format(user, passwd).encode()).decode().strip()
config = {}
if os.path.exists(config_path):
with open(config_path, 'r') as f:
try:
config = json.loads(f.read())
except Exception:
raise SyntaxError("{0} doesn't container valid json.".format(self.config_path))
config[registry] = {
"auth": encoded_data,
"email": email
}
with open(config_path, 'w') as f:
f.write(json.dumps(config))
return config_path
def _validate_ssl_certs(self):
"""
:return:
"""
if not os.path.exists(self.ssl_cert_path):
raise LookupError("ssl_cert_path '{0}' don't exist".format(self.ssl_cert_path))
for file_name in ('key', 'cert', 'ca'):
path_to_file = os.path.join(self.ssl_cert_path, '{0}.pem'.format(file_name))
if not os.path.isfile(path_to_file):
raise LookupError("ssl_cert_path: was unable to find {0}.".format(path_to_file))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for user data and password reset functionality with
Nuage VSP SDN plugin
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.base import (Account,
Template,
VirtualMachine,
Volume)
from marvin.lib.common import list_templates
from marvin.cloudstackAPI import updateTemplate
# Import System Modules
from nose.plugins.attrib import attr
import base64
class TestNuagePasswordReset(nuageTestCase):
"""Test user data and password reset functionality with
Nuage VSP SDN plugin
"""
@classmethod
def setUpClass(cls):
super(TestNuagePasswordReset, cls).setUpClass()
return
def setUp(self):
# Create an account
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
return
# create_template - Creates guest VM template with the given VM object
def create_template(self, vm):
self.debug("Creating guest VM template")
list_volume = Volume.list(self.api_client,
virtualmachineid=vm.id,
type='ROOT',
listall=True
)
if isinstance(list_volume, list):
self.volume = list_volume[0]
else:
raise Exception("Exception: Unable to find root volume for VM "
"with ID - %s" % vm.id)
self.pw_enabled_template = Template.create(
self.api_client,
self.test_data["template"],
self.volume.id,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(self.pw_enabled_template.passwordenabled, True,
"Template is not password enabled"
)
self.cleanup.append(self.pw_enabled_template)
self.debug("Created guest VM template")
# updateTemplate - Updates value of the guest VM template's password
# enabled setting
def updateTemplate(self, value):
self.debug("Updating value of guest VM template's password enabled "
"setting")
cmd = updateTemplate.updateTemplateCmd()
cmd.id = self.template.id
cmd.passwordenabled = value
self.api_client.updateTemplate(cmd)
list_template_response = list_templates(self.api_client,
templatefilter="all",
id=self.template.id
)
self.template = list_template_response[0]
self.debug("Updated guest VM template")
# get_userdata_url - Returns user data URL for the given VM object
def get_userdata_url(self, vm):
self.debug("Getting user data url")
nic = vm.nic[0]
gateway = str(nic.gateway)
self.debug("Gateway: " + gateway)
user_data_url = 'curl "http://' + gateway + ':80/latest/user-data"'
return user_data_url
# create_and_verify_fw - Creates and verifies (Ingress) firewall rule with
# a Static NAT rule enabled public IP
def create_and_verify_fw(self, vm, public_ip, network):
self.debug("Creating and verifying firewall rule")
self.create_StaticNatRule_For_VM(vm, public_ip, network)
# VSD verification
self.verify_vsd_floating_ip(network, vm, public_ip.ipaddress)
fw_rule = self.create_FirewallRule(
public_ip, self.test_data["ingress_rule"])
# VSD verification
self.verify_vsd_firewall_rule(fw_rule)
self.debug("Successfully created and verified firewall rule")
# stop_vm - Stops the given VM, and verifies its state
def stop_vm(self, vm):
self.debug("Stoping VM")
vm.stop(self.api_client)
list_vm_response = VirtualMachine.list(self.api_client,
id=vm.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state != 'Stopped':
raise Exception("Failed to stop VM (ID: %s) " % self.vm.id)
else:
raise Exception("Invalid response from list_virtual_machines VM "
"(ID: %s) " % self.vm.id)
self.debug("Stopped VM")
# install_cloud_set_guest_password_script - Installs the
# cloud-set-guest-password script from people.apache.org in the given VM
# (SSH client)
def install_cloud_set_guest_password_script(self, ssh_client):
if self.isSimulator:
self.debug( "Simulator Environment: Skipping installing"
" cloud-set-guest-password script")
return
self.debug("Installing cloud-set-guest-password script")
cmd = "cd /etc/init.d;wget http://people.apache.org/~tsp/" \
"cloud-set-guest-password"
result = self.execute_cmd(ssh_client, cmd)
self.debug("wget file cloud-set-guest-password: " + result)
if "200 OK" not in result:
self.fail("failed to wget file cloud-set-guest-password")
cmds = ["chmod +x /etc/init.d/cloud-set-guest-password",
"chkconfig --add cloud-set-guest-password"
]
for c in cmds:
result = self.execute_cmd(ssh_client, c)
self.debug("get_set_password_file cmd " + c)
self.debug("get_set_password_file result " + result)
self.debug("Installed cloud-set-guest-password script")
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_nuage_UserDataPasswordReset(self):
"""Test user data and password reset functionality with
Nuage VSP SDN plugin
"""
# 1. Create an Isolated Network with Nuage VSP Isolated Network
# offering, check if it is successfully created and is in the
# "Allocated" state.
# 2. Set password enabled to false in the guest VM template.
# 3. Deploy a VM in the created Isolated network with user data, check
# if the Isolated network state is changed to "Implemented", and
# both the VM & VR are successfully deployed and are in the
# "Running" state.
# 4. Verify that the guest VM template is not password enabled by
# checking the deployed VM's password (password == "password").
# 5. SSH into the deployed VM and verify its user data
# (expected user data == actual user data).
# 6. Check for cloud-set-guest-password script in the deployed VM for
# testing password reset functionality.
# 7. if cloud-set-guest-password script does not exist in the deployed
# VM:
# 7.1 Install the cloud-set-guest-password script from
# people.apache.org in the deployed VM.
# 7.2 Stop the deployed VM, and create a new password enabled
# guest VM template with it.
# 7.3 Deploy a new VM in the created Isolated network with the
# newly created guest VM template, check if the VM is
# successfully deployed and is in the "Running" state.
# 7.4 Verify that the new guest VM template is password enabled
# by checking the newly deployed VM's password
# (password != "password").
# 7.5 SSH into the newly deployed VM for verifying its password.
# 8. else cloud-set-guest-password script exists in the deployed VM:
# 8.1 Change password enabled to true in the guest VM template.
# 8.2 Verify that the guest VM template is password enabled.
# 9. Reset VM password, and start the VM.
# 10. Verify that the new guest VM template is password enabled by
# checking the VM's password (password != "password").
# 11. SSH into the VM for verifying its new password after its password
# reset.
# 12. Set password enabled to the default value in the guest VM
# template.
# 13. Delete all the created objects (cleanup).
for zone in self.zones:
self.debug("Zone - %s" % zone.name)
# Get Zone details
self.getZoneDetails(zone=zone)
# Configure VSD sessions
self.configureVSDSessions()
self.debug("Testing user data & password reset functionality in "
"an Isolated network...")
self.debug("Creating an Isolated network...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.network = self.create_Network(net_off)
self.validate_Network(self.network, state="Allocated")
self.debug("Setting password enabled to false in the guest VM "
"template...")
self.defaultTemplateVal = self.template.passwordenabled
if self.template.passwordenabled:
self.updateTemplate(False)
self.debug("Deploying a VM in the created Isolated network with "
"user data...")
expected_user_data = "hello world vm1"
user_data = base64.b64encode(expected_user_data)
self.test_data["virtual_machine_userdata"]["userdata"] = user_data
self.vm_1 = self.create_VM(
self.network,
testdata=self.test_data["virtual_machine_userdata"])
self.validate_Network(self.network, state="Implemented")
vr = self.get_Router(self.network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(self.vm_1, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, self.network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(self.vm_1)
self.debug("verifying that the guest VM template is not password "
"enabled...")
self.debug("VM - %s password - %s !" %
(self.vm_1.name, self.vm_1.password))
self.assertEqual(
self.vm_1.password,
self.test_data["virtual_machine_userdata"]["password"],
"Password is enabled for the VM (vm_1)"
)
self.debug("SSHing into the VM for verifying its user data...")
public_ip_1 = self.acquire_PublicIPAddress(self.network)
self.create_and_verify_fw(self.vm_1, public_ip_1, self.network)
ssh = self.ssh_into_VM(self.vm_1, public_ip_1)
user_data_cmd = self.get_userdata_url(self.vm_1)
if self.isSimulator:
self.debug("Simulator Environment: ending test early "
"because we don't have real vms")
return
self.debug("Getting user data with command: " + user_data_cmd)
actual_user_data = base64.b64decode(self.execute_cmd
(ssh, user_data_cmd))
self.debug("Actual user data - " + actual_user_data +
", Expected user data - " + expected_user_data)
self.assertEqual(actual_user_data, expected_user_data,
"Un-expected VM (VM_1) user data"
)
self.debug("Checking for cloud-set-guest-password script in the "
"VM for testing password reset functionality...")
ls_cmd = "ls /etc/init.d/cloud-set-guest-password"
ls_result = self.execute_cmd(ssh, ls_cmd)
ls_result = ls_result.lower()
self.debug("Response from ls_cmd: " + ls_result)
if "no such file" in ls_result:
self.debug("No cloud-set-guest-password script in the VM")
self.debug("Installing the cloud-set-guest-password script "
"from people.apache.org in the VM...")
self.install_cloud_set_guest_password_script(ssh)
self.debug("Stopping the VM, and creating a new password "
"enabled guest VM template with it...")
self.stop_vm(self.vm_1)
self.create_template(self.vm_1)
self.debug("Deploying a new VM in the created Isolated "
"network with the newly created guest VM "
"template...")
self.vm_2 = self.create_VM(
self.network,
testdata=self.test_data["virtual_machine_userdata"])
self.debug("Starting the VM...")
vm_2a = self.vm_2.start(self.api_client)
self.vm_2.password = vm_2a.password.strip()
self.vm_2.nic = vm_2a.nic
# VSD verification
self.verify_vsd_vm(self.vm_2)
self.debug("verifying that the guest VM template is password "
"enabled...")
self.debug("VM - %s password - %s !" %
(self.vm_2.name, self.vm_2.password))
self.assertNotEqual(
self.vm_2.password,
self.test_data["virtual_machine_userdata"]["password"],
"Password is not enabled for the VM"
)
self.debug("SSHing into the VM for verifying its password...")
public_ip_2 = self.acquire_PublicIPAddress(self.network)
self.create_and_verify_fw(self.vm_2, public_ip_2, self.network)
self.ssh_into_VM(self.vm_2, public_ip_2)
vm_test = self.vm_2
vm_test_public_ip = public_ip_2
else:
self.debug("Updating the guest VM template to password "
"enabled")
self.updateTemplate(True)
self.assertEqual(self.template.passwordenabled, True,
"Guest VM template is not password enabled"
)
vm_test = self.vm_1
vm_test_public_ip = public_ip_1
self.debug("Resetting password for VM - %s" % vm_test.name)
vm_test.password = vm_test.resetPassword(self.api_client)
self.debug("Password reset to - %s" % vm_test.password)
self.debug("Starting the VM")
vm_test.start(self.api_client)
self.debug("verifying that the guest VM template is password "
"enabled...")
self.debug("VM - %s password - %s !" %
(vm_test.name, vm_test.password))
self.assertNotEqual(
vm_test.password,
self.test_data["virtual_machine_userdata"]["password"],
"Password is not enabled for the VM"
)
self.debug("SSHing into the VM for verifying its new password "
"after its password reset...")
self.ssh_into_VM(vm_test, vm_test_public_ip)
self.debug("Setting password enabled to the default value in the "
"guest VM template...")
self.updateTemplate(self.defaultTemplateVal)
| |
#!/usr/bin/env python
# -*- mode: python -*-
# $Id: re_tests.py 275 1999-02-15 14:54:13Z hugunin $
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
('(Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,') # A few sets
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
#('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
#(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
#(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'\u', '', SYNTAX_ERROR), # A Perl escape
#(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
#(r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
#(r'\x00f', '\017', SUCCEED, 'found', chr(15)),
#(r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
#('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
#('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
#('^abc', """jkl
#abc
#xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
#(r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
#(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
#(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
#(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
]
| |
from __future__ import print_function
import numpy as np
import sys
import os
import re
import glob
import time
from zipfile import ZipFile
from PIL import Image
from core import Cache
from core import scipy_diffev
#from core import diffev_function_motmaster as diffev_function
from core import notification
from core import preset
from core import status_of_convergence
from core import create_parameter_list_bounds_and_init
from core import typecast_parameter_values
from core import get_cache
from core import track_and_update_metric
try:
from motmaster_wrapper import single_param_single_shot
from motmaster_wrapper import multi_param_single_shot
except ModuleNotFoundError:
print('Not compatible with motmaster execution')
import py_thorlabs_ctrl.kinesis
py_thorlabs_ctrl.kinesis.init(r'C:\Program Files\Thorlabs\Kinesis')
from py_thorlabs_ctrl.kinesis.motor import TCubeDCServo
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text) ]
def get_images(cache):
dst_filepath = os.path.join(cache.dirpath_timestamp, '{}.zip'.format(cache.successful_run))
zipobj = ZipFile(dst_filepath, 'w')
filenames = glob.glob(os.path.join(cache.dirpath, "*.tif"))
filenames.sort(key=natural_keys)
assert len(filenames) % 2 == 0
images = []
for filename in filenames:
image = np.array(Image.open(filename), dtype=float)
images.append(image)
zipobj.write(filename, os.path.basename(filename))
os.remove(filename)
zipobj.close()
return np.array(images, dtype=np.float64)
def image_processor(cache):
bg_sub_images = cache.images - cache.bgs
n = np.sum(bg_sub_images[1::2])/np.sum(bg_sub_images[0::2])
return 1.0-n
def get_shim_currents(theta, phi):
split = 1.4e6 # 1.4 MHz/Gauss is rate of splitting of the lines
Gx = 0.7e3 # kHz/mA
Gy = 1.14e3 # kHz/mA
Gz = 2.93e3 # kHz/mA
B = 300e-3 # Gauss
volt_to_mA = 100 # mA/V
Bx0Volt = -1.35 # Volt
By0Volt = -1.92 # Volt
Bz0Volt = -0.22 # Volt
Bx0 = Bx0Volt*volt_to_mA*Gx/split # Gauss
By0 = By0Volt*volt_to_mA*Gy/split # Gauss
Bz0 = Bz0Volt*volt_to_mA*Gz/split # Gauss
Bxp = B*np.sin(theta)*np.cos(phi) # Gauss
Byp = B*np.sin(theta)*np.sin(phi) # Gauss
Bzp = B*np.cos(theta) # Gauss
Bx = ((Bx0 + Bxp)*split/Gx)/volt_to_mA # Volt
By = ((By0 + Byp)*split/Gy)/volt_to_mA # Volt
Bz = ((Bz0 + Bzp)*split/Gz)/volt_to_mA # Volt
return Bx, By, Bz
def diffev_function_motmaster(w, *args):
cache = get_cache(args)
cache.w = typecast_parameter_values(w, cache)
# unpack the cache values and pre process for
# field and polarization variation
# phi, angle_servo_0, angle_servo_1, costheta = cache.w
# theta = np.arccos(costheta)
# final_positions = [angle_servo_0, angle_servo_1]
# unpack the cache values and pre process for
# polarization variation only
angle_servo_0, angle_servo_1 = cache.w
final_positions = [angle_servo_0, angle_servo_1]
#Bx, By, Bz = get_shim_currents(theta, phi)
Bx, By, Bz = -1.35, -1.92, -0.22
# set the rotation drivers
for servo, final_position in zip(cache.servos, final_positions):
servo.move_absolute(final_position)
# check if the servos reached the final positions
for servo, final_position in zip(cache.servos, final_positions):
current_pos = servo.get_position()
while np.abs(current_pos - final_position) > 0.1:
time.sleep(1)
current_pos = servo.get_position()
if (cache.iteration % cache.reset_interval) == 0:
if not single_param_single_shot(
cache.script_name,
cache.coil_current_parameter,
float(cache.coil_current_offvalue)):
print('Error occured during mot master process in bg')
sys.exit(0)
cache.bgs = get_images(cache)
cache.successful_run += 1
if not multi_param_single_shot(
cache.script_name,
["xShimLoadCurrent", "yShimLoadCurrent", "zShimLoadCurrent"],
[float(Bx), float(By), float(Bz)]):
print('Error occured during mot master process in evolution')
sys.exit(0)
cache.images = get_images(cache)
cache.successful_run += 1
print('No of run', cache.successful_run)
cache.output = image_processor(cache)
cache = track_and_update_metric(cache)
return cache.output
def main():
cache = Cache()
# parameter names and their corresponding bounds
parameter_names_and_bounds = {}
#parameter_names_and_bounds["costheta"] = [(-1, 1), 0.25, 'double']
#parameter_names_and_bounds["phi"] = [(0, 6.28), 3.14, 'double']
parameter_names_and_bounds["angle_servo_0"] = [(0, 90), 0, 'int']
parameter_names_and_bounds["angle_servo_1"] = [(0, 90), 0, 'int']
cache.servos = [TCubeDCServo(83817788), TCubeDCServo(83825463)]
for servo in cache.servos:
servo.create()
servo.enable()
servo.set_velocity(max_velocity = 25, acceleration = 25)
# attach dictionary to cache
cache.parameter_names_and_bounds = parameter_names_and_bounds
# MOTMaster execution parameters
cache.script_name = "OptPumpDiffEvol"
cache.coil_current_parameter = "MOTBField"
cache.coil_current_offvalue = 0.0
# scipy differential evolution parameters
cache.diffev_popsize = 15
cache.diffev_tol = 1e-3
cache.diffev_atol = 1e-3
cache.diffev_mutation = 0.9
cache.diffev_recombination = 0.5
cache.diffev_seed = None
cache.diffev_callback = None
cache.diffev_disp = False
cache.diffev_polish = True
cache.diffev_function = diffev_function_motmaster
cache.diffev_args = ([cache])
cache.diffev_strategy = 'best1bin'
cache.diffev_maxiter = 10
# fileio parameters
cache.dirpath = "C:\Users\cafmot\Desktop\slowing_chirp_optimization"
cache.filename = 'cache'
# evaluation reset parameters
cache.reset_interval = 2000
cache.checkpoint_interval = 1
# create parameters, bounds and initial vectors from the
# parameter_names_and_bounds variable
cache = create_parameter_list_bounds_and_init(cache)
# initialization of tracking and fileio variables
cache = preset(cache)
# actual differential evoltion step
cache = scipy_diffev(cache)
# notification step
cache = notification(cache)
# fileio step
cache.save()
for servo in cache.servos:
servo.disable()
servo.disconnect()
return cache
if __name__ == '__main__':
main()
| |
"""Parsing utilities for Wikipedia database dumps."""
from __future__ import print_function
from os.path import basename
from bz2 import BZ2File
from collections import Counter, namedtuple
import gzip
from HTMLParser import HTMLParser
from itertools import chain
import logging
import re
import xml.etree.ElementTree as etree # don't use LXML, it's slower (!)
import six
from semanticizest._util import ngrams
from semanticizest._version import __version__
_logger = logging.getLogger(__name__)
Page = namedtuple("Page", ['page_id', 'title', 'content', 'redirect'])
def _get_namespace(tag):
try:
namespace = re.match(r"^{(.*?)}", tag).group(1)
except AttributeError:
namespace = ''
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("namespace %r not recognized as MediaWiki dump"
% namespace)
return namespace
if six.PY3:
def _tounicode(s):
return s
else:
def _tounicode(s):
# Convert ASCII strings coming from xml.etree.
if isinstance(s, str):
s = s.decode('ascii')
return s
def extract_pages(f):
"""Extract pages from Wikimedia database dump.
Parameters
----------
f : file-like or str
Handle on Wikimedia article dump. May be any type supported by
etree.iterparse.
Returns
-------
pages : iterable over `Page`s
namedtuples containging the fields (page_id, title, content,
redirect_target) triples. In Python 2.x, may produce either
str or unicode strings.
"""
elems = etree.iterparse(f, events=["end"])
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
_, elem = next(elems)
namespace = _get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
ns_path = "./{%(ns)s}ns" % ns_mapping
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
id_path = "./{%(ns)s}id" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
redir_path = "./{%(ns)s}redirect" % ns_mapping
for _, elem in elems:
if elem.tag == page_tag:
if elem.find(ns_path).text != '0':
continue
text = elem.find(text_path).text
if text is None:
# Empty article; these occur in Wikinews dumps.
continue
redir = elem.find(redir_path)
redir = (_tounicode(redir.attrib['title'])
if redir is not None else None)
text = _tounicode(text)
title = _tounicode(elem.find(title_path).text)
yield Page(int(elem.find(id_path).text), title, text, redir)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. That shouldn't matter since the pages
# comprise the bulk of the file.
elem.clear()
def _clean_link(l):
"""Clean links (anchor and titles)."""
l = l.strip()
l = re.sub(r'\s+', ' ', l)
return l
def extract_links(article):
"""Extract all (or most) links from article text (wiki syntax).
Returns an iterable over (target, anchor) pairs.
"""
links = re.findall(r"(\w*) \[\[ ([^]]+) \]\] (\w*)", article,
re.UNICODE | re.VERBOSE)
r = []
for before, l, after in links:
if '|' in l:
target, anchor = l.split('|', 1)
else:
target, anchor = l, l
# If the anchor contains a colon, assume it's a file or category link.
if ':' in target:
continue
# Some links contain newlines...
target = _clean_link(target)
anchor = _clean_link(anchor)
# Remove section links and normalize to the format used in <redirect>
# elements: uppercase first character, spaces instead of underscores.
target = target.split('#', 1)[0].replace('_', ' ')
if not target:
continue # section link
if not target[0].isupper():
target = target[0].upper() + target[1:]
anchor = before + anchor + after
r.append((target, anchor))
return r
_UNWANTED = re.compile(r"""
(:?
\{\{ .*? \}\}
| \{\| .*? \|\}
| ^[|!] .* $ # table content
| <math> .*? </math>
| <ref .*? > .*? </ref>
| <br\s*/>
| </?su[bp]\s*>
| \[\[ [^][:]* : (\[\[.*?\]\]|.)*? \]\] # media, categories
| =+ .*? =+ # headers
| ''+
| ^\* # list bullets
)
""", re.DOTALL | re.MULTILINE | re.UNICODE | re.VERBOSE)
_unescape_entities = HTMLParser().unescape
def clean_text(page):
"""Return the clean-ish running text parts of a page."""
return re.sub(_UNWANTED, "", _unescape_entities(page))
_LINK_SYNTAX = re.compile(r"""
(?:
\[\[
(?: [^]|]* \|)? # "target|" in [[target|anchor]]
|
\]\]
)
""", re.DOTALL | re.MULTILINE | re.VERBOSE)
def remove_links(page):
"""Remove links from clean_text output."""
page = re.sub(r'\]\]\[\[', ' ', page) # hack hack hack, see test
return re.sub(_LINK_SYNTAX, '', page)
def page_statistics(page, N, sentence_splitter=None, tokenizer=None):
"""Gather statistics from a single WP page.
The sentence_splitter should be a callable that splits text into sentences.
It defaults to an unspecified heuristic.
See ``parse_dump`` for the parameters.
Returns
-------
stats : (dict, dict)
The first dict maps (target, anchor) pairs to counts.
The second maps n-grams (up to N) to counts.
"""
if N is not None and not isinstance(N, int):
raise TypeError("expected integer or None for N, got %r" % N)
clean = clean_text(page)
link_counts = Counter(extract_links(clean))
if N:
no_links = remove_links(clean)
if sentence_splitter is None:
sentences = re.split(r'(?:\n{2,}|\.\s+)', no_links,
re.MULTILINE | re.UNICODE)
else:
sentences = [sentence
for paragraph in re.split('\n+', no_links)
for sentence in paragraph]
if tokenizer is None:
tokenizer = re.compile(r'\w+', re.UNICODE).findall
all_ngrams = chain.from_iterable(ngrams(tokenizer(sentence), N)
for sentence in sentences)
ngram_counts = Counter(all_ngrams)
else:
ngram_counts = None
return link_counts, ngram_counts
def _open(f):
if isinstance(f, six.string_types):
if f.endswith('.gz'):
return gzip.open(f)
elif f.endswith('.bz2'):
return BZ2File(f)
return open(f)
return f
def parse_dump(dump, db, N=7, sentence_splitter=None, tokenizer=None):
"""Parse Wikipedia database dump, return n-gram and link statistics.
Parameters
----------
dump : {file-like, str}
Path to or handle on a Wikipedia page dump, e.g.
'chowiki-20140919-pages-articles.xml.bz2'.
db : SQLite connection
Connection to database that will be used to store statistics.
N : integer
Maximum n-gram length. Set this to a false value to disable
n-gram counting; this disables some of the fancier statistics,
but baseline entity linking will still work.
sentence_splitter : callable, optional
Sentence splitter. Called on output of paragraph splitter
(strings).
tokenizer : callable, optional
Tokenizer. Called on output of sentence splitter (strings).
Must return iterable over strings.
"""
f = _open(dump)
redirects = {}
c = db.cursor()
# Store the semanticizer version for later reference
c.execute('''insert into parameters values ('version', ?);''',
(__version__,))
# Store the dump file name
c.execute('''insert into parameters values ('dump', ?);''',
(basename(dump),))
# Store the maximum ngram length, so we can use it later on
c.execute('''insert into parameters values ('N', ?);''', (str(N),))
# Temporary index to speed up insertion
c.execute('''create unique index target_anchor
on linkstats(ngram_id, target)''')
_logger.info("Processing articles")
for i, page in enumerate(extract_pages(f), 1):
if i % 10000 == 0:
_logger.info("%d articles done", i)
if page.redirect is not None:
redirects[page.title] = page.redirect
continue
link, ngram = page_statistics(page.content, N=N, tokenizer=tokenizer,
sentence_splitter=sentence_splitter)
# We don't count the n-grams within the links, but we need them
# in the table, so add them with zero count.
tokens = chain(six.iteritems(ngram or {}),
((anchor, 0) for _, anchor in six.iterkeys(link)))
tokens = list(tokens)
c.executemany('''insert or ignore into ngrams (ngram) values (?)''',
((g,) for g, _ in tokens))
c.executemany('''update ngrams set tf = tf + ?, df = df + 1
where ngram = ?''',
((count, token) for token, count in tokens))
c.executemany('''insert or ignore into linkstats values
((select id from ngrams where ngram = ?), ?, 0)''',
((anchor, target)
for target, anchor in six.iterkeys(link)))
c.executemany('''update linkstats set count = count + ?
where ngram_id = (select rowid from ngrams
where ngram = ?)''',
((count, anchor)
for (_, anchor), count in six.iteritems(link)))
db.commit()
_logger.info("Processing %d redirects", len(redirects))
for redir, target in redirects.items():
for anchor, count in c.execute('''select ngram_id, count from linkstats
where target = ?''', [redir]):
# TODO: combine the next two execute statements
c.execute('''insert or ignore into linkstats values (?, ?, 0)''',
[anchor, target])
c.execute('''update linkstats
set count = count + ?
where target = ? and ngram_id = ?''',
(count, target, anchor))
c.executemany('delete from linkstats where target = ?',
([redir] for redir in redirects))
_logger.info("Finalizing database")
c.executescript('''drop index target_anchor; vacuum;''')
_logger.info("Dump parsing done: processed %d articles", i)
db.commit()
| |
import unittest
import os
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
import skrf as rf
import numpy as npy
from numpy.random import rand
from nose.tools import nottest
from nose.plugins.skip import SkipTest
from skrf.calibration import OnePort, PHN, SDDL, TRL, SOLT, UnknownThru, EightTerm, TwoPortOnePath, EnhancedResponse,TwelveTerm, terminate
from skrf.networkSet import NetworkSet
# number of frequency points to test calibration at
# i choose 1 for speed, but given that many tests employ *random*
# networks values >100 are better for initialy verification
global NPTS
NPTS = 1
WG = rf.RectangularWaveguide(rf.F(75,100,NPTS), a=100*rf.mil,z0=50)
class CalibrationTest(object):
'''
This is the generic Calibration test case which all Calibration
Subclasses should be able to pass. They must implement
'''
def test_accuracy_of_dut_correction(self):
a = self.wg.random(n_ports=self.n_ports, name = 'actual')
m = self.measure(a)
c = self.cal.apply_cal(m)
c.name = 'corrected'
self.assertEqual(c,a)
def test_error_ntwk(self):
a= self.cal.error_ntwk
def test_coefs_ntwks(self):
a= self.cal.coefs_ntwks
def test_caled_ntwks(self):
a= self.cal.caled_ntwks
def test_residual_ntwks(self):
a= self.cal.residual_ntwks
def test_embed_then_apply_cal(self):
a = self.wg.random(n_ports=self.n_ports)
self.assertEqual(self.cal.apply_cal(self.cal.embed(a)),a)
def test_embed_equal_measure(self):
a = self.wg.random(n_ports=self.n_ports)
self.assertEqual(self.cal.embed(a),self.measure(a))
def test_from_coefs(self):
cal_from_coefs = self.cal.from_coefs(self.cal.frequency, self.cal.coefs)
ntwk = self.wg.random(n_ports=self.n_ports)
if cal_from_coefs.apply_cal(self.cal.embed(ntwk))!= ntwk:
raise ValueError
self.assertEqual(cal_from_coefs.apply_cal(self.cal.embed(ntwk)),ntwk)
def test_from_coefs_ntwks(self):
cal_from_coefs = self.cal.from_coefs_ntwks(self.cal.coefs_ntwks)
ntwk = self.wg.random(n_ports=self.n_ports)
if cal_from_coefs.apply_cal(self.cal.embed(ntwk))!= ntwk:
raise ValueError
self.assertEqual(cal_from_coefs.apply_cal(self.cal.embed(ntwk)),ntwk)
class OnePortTest(unittest.TestCase, CalibrationTest):
'''
One-port calibration test.
'''
def setUp(self):
self.n_ports = 1
self.wg = WG
wg = self.wg
self.E = wg.random(n_ports =2, name = 'E')
ideals = [
wg.short( name='short'),
wg.delay_short( 45.,'deg',name='ew'),
wg.delay_short( 90.,'deg',name='qw'),
wg.match( name='load'),
]
measured = [self.measure(k) for k in ideals]
self.cal = rf.OnePort(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def measure(self, ntwk):
out = self.E**ntwk
out.name = ntwk.name
return out
def test_accuracy_of_directivity(self):
self.assertEqual(
self.E.s11,
self.cal.coefs_ntwks['directivity'],
)
def test_accuracy_of_source_match(self):
self.assertEqual(
self.E.s22,
self.cal.coefs_ntwks['source match'],
)
def test_accuracy_of_reflection_tracking(self):
self.assertEqual(
self.E.s21*self.E.s12,
self.cal.coefs_ntwks['reflection tracking'],
)
class SDDLTest(OnePortTest):
def setUp(self):
#raise SkipTest('Doesnt work yet')
self.n_ports = 1
self.wg = WG
wg = self.wg
self.E = wg.random(n_ports =2, name = 'E')
#self.E.s[0,:,:] = npy.array([[.1j,1],[1j,1j+2]])
#print self.E.s[0]
ideals = [
wg.short( name='short'),
wg.delay_short( 45.,'deg',name='ew'),
wg.delay_short( 90.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
actuals = [
wg.short( name='short'),
wg.delay_short( 10.,'deg',name='ew'),
wg.delay_short( 33.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.SDDL(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def test_init_with_nones(self):
wg=self.wg
wg.frequency = rf.F.from_f([100])
self.E = wg.random(n_ports =2, name = 'E')
ideals = [
wg.short( name='short'),
None,
None,
wg.load(.2+.2j, name='load'),
]
actuals = [
wg.short( name='short'),
wg.delay_short( 10.,'deg',name='ew'),
wg.delay_short( 33.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.SDDL(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
self.cal.run()
def test_from_coefs(self):
raise SkipTest('not applicable ')
def test_from_coefs_ntwks(self):
raise SkipTest('not applicable ')
class SDDLWeikle(OnePortTest):
def setUp(self):
#raise SkipTest('Doesnt work yet')
self.n_ports = 1
self.wg = WG
wg = self.wg
self.E = wg.random(n_ports =2, name = 'E')
#self.E.s[0,:,:] = npy.array([[.1j,1],[1j,1j+2]])
#print self.E.s[0]
ideals = [
wg.short( name='short'),
wg.delay_short( 45.,'deg',name='ew'),
wg.delay_short( 90.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
actuals = [
wg.short( name='short'),
wg.delay_short( 10.,'deg',name='ew'),
wg.delay_short( 80.,'deg',name='qw'),
wg.load(.2+.2j, name='load'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.SDDLWeikle(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def test_from_coefs(self):
raise SkipTest('not applicable ')
def test_from_coefs_ntwks(self):
raise SkipTest('not applicable ')
class SDDMTest(OnePortTest):
'''
This is a specific test of SDDL to verify it works when the load is
a matched load. This test has been used to show that the SDDLWeikle
variant fails, with a perfect matched load.
'''
def setUp(self):
self.n_ports = 1
self.wg = WG
wg = self.wg
self.E = wg.random(n_ports =2, name = 'E')
ideals = [
wg.short( name='short'),
wg.delay_short( 45.,'deg',name='ew'),
wg.delay_short( 90.,'deg',name='qw'),
wg.match( name='load'),
]
actuals = [
wg.short( name='short'),
wg.delay_short( 10.,'deg',name='ew'),
wg.delay_short( 80.,'deg',name='qw'),
wg.match(name='load'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.SDDL(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def test_from_coefs(self):
raise SkipTest('not applicable ')
def test_from_coefs_ntwks(self):
raise SkipTest('not applicable ')
@SkipTest
class PHNTest(OnePortTest):
'''
'''
def setUp(self):
self.n_ports = 1
self.wg = WG
wg = self.wg
self.E = wg.random(n_ports =2, name = 'E')
known1 = wg.random()
known2 = wg.random()
#known1 = wg.short()
#known2 = wg.load(rand() + rand()*1j)
ideals = [
wg.delay_short( 45.,'deg',name='ideal ew'),
wg.delay_short( 90.,'deg',name='ideal qw'),
known1,
known2,
]
actuals = [
wg.delay_short( 33.,'deg',name='true ew'),
wg.delay_short( 110.,'deg',name='true qw'),
known1,
known2,
]
measured = [self.measure(k) for k in actuals]
self.actuals = actuals
self.cal = PHN(
is_reciprocal = True,
ideals = ideals,
measured = measured,
)
def test_determine_ideals(self):
self.cal.run()
self.assertEqual(self.actuals[0], self.cal.ideals[0])
self.assertEqual(self.actuals[1], self.cal.ideals[1])
def test_from_coefs(self):
raise SkipTest('not applicable')
def test_from_coefs_ntwks(self):
raise SkipTest('not applicable ')
class EightTermTest(unittest.TestCase, CalibrationTest):
def setUp(self):
self.n_ports = 2
self.wg =WG
wg= self.wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.thru(name='thru'),
]
measured = [self.measure(k) for k in ideals]
self.cal = rf.EightTerm(
ideals = ideals,
measured = measured,
switch_terms = (self.gamma_f, self.gamma_r)
)
def terminate(self, ntwk):
'''
terminate a measured network with the switch terms
'''
return terminate(ntwk,self.gamma_f, self.gamma_r)
def measure(self,ntwk):
out = self.terminate(self.X**ntwk**self.Y)
out.name = ntwk.name
return out
def test_unterminating(self):
a = self.wg.random(n_ports=self.n_ports)
#unermintated measurment
ut = self.X**a**self.Y
#terminated measurement
m = self.measure(a)
self.assertEqual(self.cal.unterminate(m), ut)
def test_forward_directivity_accuracy(self):
self.assertEqual(
self.X.s11,
self.cal.coefs_ntwks['forward directivity'])
def test_forward_source_match_accuracy(self):
self.assertEqual(
self.X.s22 ,
self.cal.coefs_ntwks['forward source match'] )
def test_forward_reflection_tracking_accuracy(self):
self.assertEqual(
self.X.s21 * self.X.s12 ,
self.cal.coefs_ntwks['forward reflection tracking'])
def test_reverse_source_match_accuracy(self):
self.assertEqual(
self.Y.s11 ,
self.cal.coefs_ntwks['reverse source match'] )
def test_reverse_directivity_accuracy(self):
self.assertEqual(
self.Y.s22 ,
self.cal.coefs_ntwks['reverse directivity'] )
def test_reverse_reflection_tracking_accuracy(self):
self.assertEqual(
self.Y.s21 * self.Y.s12 ,
self.cal.coefs_ntwks['reverse reflection tracking'])
def test_k_accuracy(self):
self.assertEqual(
self.X.s21/self.Y.s12 ,
self.cal.coefs_ntwks['k'] )
@nottest
def test_verify_12term(self):
self.assertTrue(self.cal.verify_12term_ntwk.s_mag.max() < 1e-3)
class TRLTest(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = WG
wg= self.wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
# make error networks have s21,s12 >> s11,s22 so that TRL
# can guess at line length
#self.X.s[:,0,0] *=1e-1
#self.Y.s[:,0,0] *=1e-1
#self.X.s[:,1,1] *=1e-1
#self.Y.s[:,1,1] *=1e-1
actuals = [
wg.thru( name='thru'),
wg.short(nports=2, name='short'),
wg.attenuator(-3,True, 45,'deg')
#wg.line(45,'deg',name='line'),
]
self.actuals=actuals
ideals = [
wg.thru( name='thru'),
wg.short(nports=2, name='short'),
wg.line(90,'deg',name='line'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.TRL(
ideals = ideals,
measured = measured,
switch_terms = (self.gamma_f, self.gamma_r)
)
def test_found_line(self):
self.cal.run()
self.assertTrue(self.cal.ideals[2]==self.actuals[2])
class TRLWithNoIdealsTest(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = WG
wg= self.wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
# make error networks have s21,s12 >> s11,s22 so that TRL
# can guess at line length
#self.X.s[:,0,0] *=1e-1
#self.Y.s[:,0,0] *=1e-1
#self.X.s[:,1,1] *=1e-1
#self.Y.s[:,1,1] *=1e-1
ideals = None
actuals = [
wg.thru( name='thru'),
wg.short(nports=2, name='short'),
wg.attenuator(-3,True, 45,'deg')
]
self.actuals=actuals
measured = [self.measure(k) for k in actuals]
self.cal = rf.TRL(
ideals = ideals,
measured = measured,
switch_terms = (self.gamma_f, self.gamma_r)
)
def test_found_line(self):
self.cal.run()
self.assertTrue(self.cal.ideals[2]==self.actuals[2])
class TRLMultiline(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = WG
wg= self.wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
# make error networks have s21,s12 >> s11,s22 so that TRL
# can guess at line length
#self.X.s[:,0,0] *=1e-1
#self.Y.s[:,0,0] *=1e-1
#self.X.s[:,1,1] *=1e-1
#self.Y.s[:,1,1] *=1e-1
ideals = None
actuals = [
wg.thru( name='thru'),
wg.short(nports=2, name='short'),
wg.short(nports=2, name='open'),
wg.attenuator(-3,True, 45,'deg'),
wg.attenuator(-6,True, 90,'deg'),
wg.attenuator(-8,True, 145,'deg'),
]
self.actuals=actuals
measured = [self.measure(k) for k in actuals]
self.cal = rf.TRL(
ideals = ideals,
measured = measured,
switch_terms = (self.gamma_f, self.gamma_r),
n_reflects=2,
)
def test_found_line(self):
self.cal.run()
for k in range(2,5):
self.assertTrue(self.cal.ideals[k]==self.actuals[k])
class TREightTermTest(unittest.TestCase, CalibrationTest):
def setUp(self):
raise SkipTest()
self.n_ports = 2
self.wg = WG
wg= self.wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.thru(name='thru'),
]
measured = [self.measure_std(k) for k in ideals]
cal1 = rf.TwoPortOnePath(
ideals = ideals,
measured = measured
)
switch_terms = (cal1.coefs_ntwks['forward switch term'],
cal1.coefs_ntwks['reverse switch term'])
measured = [self.measure(k) for k in ideals]
self.cal = rf.EightTerm(
ideals = ideals,
measured = measured,
switch_terms = switch_terms,
)
raise ValueError()
def measure_std(self,ntwk):
r= self.wg.random(2)
m = ntwk.copy()
mf = self.X**ntwk**self.Y
m.s[:,1,0] = mf.s[:,1,0]
m.s[:,0,0] = mf.s[:,0,0]
m.s[:,1,1] = r.s[:,1,1]
m.s[:,0,1] = r.s[:,0,1]
return m
def measure(self,ntwk):
m = ntwk.copy()
mf = self.X**ntwk**self.Y
mr = self.X**ntwk.flipped()**self.Y
m.s[:,1,0] = mf.s[:,1,0]
m.s[:,0,0] = mf.s[:,0,0]
m.s[:,1,1] = mr.s[:,0,0]
m.s[:,0,1] = mr.s[:,1,0]
return m
class TwelveTermTest(unittest.TestCase, CalibrationTest):
'''
This test verifys the accuracy of the SOLT calibration. Generating
measured networks requires different error networks for forward and
reverse excitation states, these are described as follows
forward excitation
used for S21 and S11
Mf = Xf ** S ** Yf
reverse excitation
used for S12 and S22
Mr = Xr ** S ** Yr
'''
def setUp(self):
self.n_ports = 2
self.wg = WG
wg = self.wg
self.Xf = wg.random(n_ports =2, name = 'Xf')
self.Xr = wg.random(n_ports =2, name = 'Xr')
self.Yf = wg.random(n_ports =2, name='Yf')
self.Yr = wg.random(n_ports =2, name='Yr')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.random(2,name='rand1'),
wg.random(2,name='rand2'),
]
measured = [ self.measure(k) for k in ideals]
self.cal = rf.TwelveTerm(
ideals = ideals,
measured = measured,
n_thrus=2,
)
def measure(self,ntwk):
m = ntwk.copy()
mf = self.Xf**ntwk**self.Yf
mr = self.Xr**ntwk**self.Yr
m.s[:,1,0] = mf.s[:,1,0]
m.s[:,0,0] = mf.s[:,0,0]
m.s[:,0,1] = mr.s[:,0,1]
m.s[:,1,1] = mr.s[:,1,1]
return m
def test_forward_directivity_accuracy(self):
self.assertEqual(
self.Xf.s11,
self.cal.coefs_ntwks['forward directivity'])
def test_forward_source_match_accuracy(self):
self.assertEqual(
self.Xf.s22 ,
self.cal.coefs_ntwks['forward source match'] )
def test_forward_load_match_accuracy(self):
self.assertEqual(
self.Yf.s11 ,
self.cal.coefs_ntwks['forward load match'])
def test_forward_reflection_tracking_accuracy(self):
self.assertEqual(
self.Xf.s21 * self.Xf.s12 ,
self.cal.coefs_ntwks['forward reflection tracking'])
def test_forward_transmission_tracking_accuracy(self):
self.assertEqual(
self.Xf.s21*self.Yf.s21 ,
self.cal.coefs_ntwks['forward transmission tracking'])
def test_reverse_source_match_accuracy(self):
self.assertEqual(
self.Yr.s11 ,
self.cal.coefs_ntwks['reverse source match'] )
def test_reverse_directivity_accuracy(self):
self.assertEqual(
self.Yr.s22 ,
self.cal.coefs_ntwks['reverse directivity'] )
def test_reverse_load_match_accuracy(self):
self.assertEqual(
self.Xr.s22 ,
self.cal.coefs_ntwks['reverse load match'])
def test_reverse_reflection_tracking_accuracy(self):
self.assertEqual(
self.Yr.s21 * self.Yr.s12 ,
self.cal.coefs_ntwks['reverse reflection tracking'])
def test_reverse_transmission_tracking_accuracy(self):
self.assertEqual(
self.Yr.s12*self.Xr.s12 ,
self.cal.coefs_ntwks['reverse transmission tracking'])
@nottest
def test_convert_12term_2_8term(self):
converted = rf.convert_8term_2_12term(
rf.convert_12term_2_8term(self.cal.coefs))
for k in converted:
print(('{}-{}'.format(k,abs(self.cal.coefs[k] - converted[k]))))
for k in converted:
self.assertTrue(abs(self.cal.coefs[k] - converted[k])<1e-9)
@nottest
def test_convert_12term_2_8term_correction_accuracy(self):
converted = rf.convert_8term_2_12term(
rf.convert_12term_2_8term(self.cal.coefs))
self.cal._coefs = converted
a = self.wg.random(n_ports=2)
m = self.measure(a)
c = self.cal.apply_cal(m)
self.assertEqual(a,c)
@nottest
def test_verify_12term(self):
self.assertTrue(self.cal.verify_12term_ntwk.s_mag.max() < 1e-3)
class TwelveTermSloppyInitTest(TwelveTermTest):
'''
Test the TwelveTerm.__init__'s ability to
1) determine the number of thrus (n_thrus) hueristically
2) put the standards in correct order if they use sloppy_input
It must be a entirely seperate test because we want to ensure it
creates an accurate calibration.
'''
def setUp(self):
self.n_ports = 2
self.wg = WG
wg = self.wg
self.Xf = wg.random(n_ports =2, name = 'Xf')
self.Xr = wg.random(n_ports =2, name = 'Xr')
self.Yf = wg.random(n_ports =2, name='Yf')
self.Yr = wg.random(n_ports =2, name='Yr')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.attenuator(-20,name='atten'),
wg.line(45,'deg',name = 'line') ,
]
measured = [ self.measure(k) for k in ideals]
self.cal= TwelveTerm(
ideals = NetworkSet(ideals).to_dict(),
measured = NetworkSet(measured).to_dict(),
n_thrus=None,
)
def measure(self,ntwk):
m = ntwk.copy()
mf = self.Xf**ntwk**self.Yf
mr = self.Xr**ntwk**self.Yr
m.s[:,1,0] = mf.s[:,1,0]
m.s[:,0,0] = mf.s[:,0,0]
m.s[:,0,1] = mr.s[:,0,1]
m.s[:,1,1] = mr.s[:,1,1]
return m
class SOLTTest(TwelveTermTest):
def setUp(self):
self.n_ports = 2
self.wg = WG
wg = self.wg
self.Xf = wg.random(n_ports =2, name = 'Xf')
self.Xr = wg.random(n_ports =2, name = 'Xr')
self.Yf = wg.random(n_ports =2, name='Yf')
self.Yr = wg.random(n_ports =2, name='Yr')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
None,
]
actuals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.thru(),
]
measured = [ self.measure(k) for k in actuals]
self.cal = SOLT(
ideals = ideals,
measured = measured,
n_thrus=1,
)
class TwoPortOnePathTest(TwelveTermTest):
def setUp(self):
self.n_ports = 2
self.wg =WG
wg = self.wg
self.Xf = wg.random(n_ports =2, name = 'Xf')
self.Yf = wg.random(n_ports =2, name='Yf')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.random(2,name='rand1'),
wg.random(2,name='rand2'),
]
measured = [ self.measure(k) for k in ideals]
self.cal = TwoPortOnePath(
ideals = ideals,
measured = measured,
source_port=1,
#n_thrus=2,
)
def measure(self,ntwk):
r= self.wg.random(2)
m = ntwk.copy()
mf = self.Xf**ntwk**self.Yf
m.s[:,1,0] = mf.s[:,1,0]
m.s[:,0,0] = mf.s[:,0,0]
m.s[:,1,1] = r.s[:,1,1]
m.s[:,0,1] = r.s[:,0,1]
return m
def test_accuracy_of_dut_correction(self):
a = self.wg.random(n_ports=self.n_ports, name = 'actual')
f = self.measure(a)
r = self.measure(a.flipped())
c = self.cal.apply_cal((f,r))
c.name = 'corrected'
self.assertEqual(c,a)
def test_embed_then_apply_cal(self):
a = self.wg.random(n_ports=self.n_ports)
f = self.cal.embed(a)
r = self.cal.embed(a.flipped())
self.assertEqual(self.cal.apply_cal((f,r)),a)
def test_embed_equal_measure(self):
# measurment procedure is different so this test doesnt apply
raise SkipTest()
def test_from_coefs(self):
cal_from_coefs = self.cal.from_coefs(self.cal.frequency, self.cal.coefs)
ntwk = self.wg.random(n_ports=self.n_ports)
def test_from_coefs_ntwks(self):
cal_from_coefs = self.cal.from_coefs_ntwks(self.cal.coefs_ntwks)
def test_reverse_source_match_accuracy(self):
raise SkipTest()
def test_reverse_directivity_accuracy(self):
raise SkipTest()
def test_reverse_load_match_accuracy(self):
raise SkipTest()
def test_reverse_reflection_tracking_accuracy(self):
raise SkipTest()
def test_reverse_transmission_tracking_accuracy(self):
raise SkipTest()
class UnknownThruTest(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = WG
wg= self.wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
actuals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='match'),
wg.impedance_mismatch(50,45)**wg.line(20,'deg',name='line')**wg.impedance_mismatch(45,50)
]
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='match'),
wg.thru(name='thru'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.UnknownThru(
ideals = ideals,
measured = measured,
switch_terms = [self.gamma_f, self.gamma_r]
)
class MRCTest(EightTermTest):
def setUp(self):
self.n_ports = 2
self.wg = WG
wg= self.wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
def delay_shorts(d1,d2):
ds1 = wg.delay_short(d1,'deg')
ds2 = wg.delay_short(d2,'deg')
return rf.two_port_reflect(ds1,ds2)
actuals = [
wg.short(nports=2, name='short'),
delay_shorts(65,130),
delay_shorts(120,75),
wg.load(.2+.2j,nports=2, name='match'),
wg.impedance_mismatch(50,45)**wg.line(20,'deg',name='line')**wg.impedance_mismatch(45,50)
]
ideals = [
wg.short(nports=2, name='short'),
delay_shorts(45,90),
delay_shorts(90,45),
wg.load(.2+.2j,nports=2, name='match'),
wg.thru(name='thru'),
]
measured = [self.measure(k) for k in actuals]
self.cal = rf.MRC(
ideals = ideals,
measured = measured,
switch_terms = [self.gamma_f, self.gamma_r]
)
class TwelveTermToEightTermTest(unittest.TestCase, CalibrationTest):
'''
This test verifies the accuracy of the SOLT calibration, when used
on an error-box (8-term) model.
'''
def setUp(self):
self.n_ports = 2
wg= rf.wr10
wg.frequency = rf.F.from_f([100])
self.wg = wg
self.X = wg.random(n_ports =2, name = 'X')
self.Y = wg.random(n_ports =2, name='Y')
self.gamma_f = wg.random(n_ports =1, name='gamma_f')
self.gamma_r = wg.random(n_ports =1, name='gamma_r')
ideals = [
wg.short(nports=2, name='short'),
wg.open(nports=2, name='open'),
wg.match(nports=2, name='load'),
wg.thru(name='thru'),
]
measured = [ self.measure(k) for k in ideals]
self.cal = rf.TwelveTerm(
ideals = ideals,
measured = measured,
)
coefs = rf.calibration.convert_12term_2_8term(self.cal.coefs, redundant_k=1)
coefs = NetworkSet.from_s_dict(coefs,
frequency=self.cal.frequency).to_dict()
self.coefs= coefs
def measure(self,ntwk):
m = ntwk.copy()
return terminate(m, self.gamma_f, self.gamma_r)
def test_forward_switch_term(self):
self.assertEqual(self.coefs['forward switch term'], self.gamma_f)
def test_forward_switch_term(self):
self.assertEqual(self.coefs['reverse switch term'], self.gamma_r)
@nottest
def test_k(self):
self.assertEqual(self.coefs['k'], self.X.s21/self.Y.s12 )
def test_verify_12term(self):
self.assertTrue(self.cal.verify_12term_ntwk.s_mag.max() < 1e-3)
if __name__ == "__main__":
unittest.main()
| |
import shutil
import tempfile
import time
from ethereum import spv
import ethereum
import ethereum.db as db
import ethereum.opcodes as opcodes
import ethereum.abi as abi
from ethereum.slogging import LogRecorder, configure_logging, set_level
from ethereum.utils import to_string
from ethereum.config import Env
from ethereum._solidity import get_solidity
import rlp
from rlp.utils import decode_hex, encode_hex, ascii_chr
serpent = None
u = ethereum.utils
t = ethereum.transactions
b = ethereum.blocks
pb = ethereum.processblock
vm = ethereum.vm
accounts = []
keys = []
for i in range(10):
keys.append(u.sha3(to_string(i)))
accounts.append(u.privtoaddr(keys[-1]))
k0, k1, k2, k3, k4, k5, k6, k7, k8, k9 = keys[:10]
a0, a1, a2, a3, a4, a5, a6, a7, a8, a9 = accounts[:10]
languages = {}
_solidity = get_solidity()
if _solidity:
languages['solidity'] = _solidity
seed = 3 ** 160
def dict_without(d, *args):
o = {}
for k, v in list(d.items()):
if k not in args:
o[k] = v
return o
def dict_with(d, **kwargs):
o = {}
for k, v in list(d.items()):
o[k] = v
for k, v in list(kwargs.items()):
o[k] = v
return o
# Pseudo-RNG (deterministic for now for testing purposes)
def rand():
global seed
seed = pow(seed, 2, 2 ** 512)
return seed % 2 ** 256
class TransactionFailed(Exception):
pass
class ContractCreationFailed(Exception):
pass
class ABIContract():
def __init__(self, _state, _abi, address, listen=True, log_listener=None):
self.address = address
self._translator = abi.ContractTranslator(_abi)
self.abi = _abi
if listen:
if not log_listener:
listener = lambda log: self._translator.listen(log, noprint=False)
else:
def listener(log):
r = self._translator.listen(log, noprint=True)
if r:
log_listener(r)
_state.block.log_listeners.append(listener)
def kall_factory(f):
def kall(*args, **kwargs):
o = _state._send(kwargs.get('sender', k0),
self.address,
kwargs.get('value', 0),
self._translator.encode(f, args),
**dict_without(kwargs, 'sender', 'value', 'output'))
# Compute output data
if kwargs.get('output', '') == 'raw':
outdata = o['output']
elif not o['output']:
outdata = None
else:
outdata = self._translator.decode(f, o['output'])
outdata = outdata[0] if len(outdata) == 1 else outdata
# Format output
if kwargs.get('profiling', ''):
return dict_with(o, output=outdata)
else:
return outdata
return kall
for f in self._translator.function_data:
vars(self)[f] = kall_factory(f)
class state():
def __init__(self, num_accounts=len(keys)):
global serpent
if not serpent:
serpent = __import__('serpent')
self.temp_data_dir = tempfile.mkdtemp()
self.db = db.EphemDB()
self.env = Env(self.db)
o = {}
for i in range(num_accounts):
o[accounts[i]] = {"wei": 10 ** 24}
for i in range(1, 5):
o[u.int_to_addr(i)] = {"wei": 1}
self.block = b.genesis(self.env, start_alloc=o)
self.blocks = [self.block]
self.block.timestamp = 1410973349
self.block.coinbase = a0
self.block.gas_limit = 10 ** 9
def __del__(self):
shutil.rmtree(self.temp_data_dir)
def contract(self, code, sender=k0, endowment=0, language='serpent', gas=None):
if language not in languages:
languages[language] = __import__(language)
language = languages[language]
evm = language.compile(code)
o = self.evm(evm, sender, endowment)
assert len(self.block.get_code(o)), "Contract code empty"
return o
def abi_contract(self, code, sender=k0, endowment=0, language='serpent', contract_name='',
gas=None, log_listener=None, listen=True):
if contract_name:
assert language == 'solidity'
cn_args = dict(contract_name=contract_name)
else:
cn_args = {}
if language not in languages:
languages[language] = __import__(language)
language = languages[language]
evm = language.compile(code, **cn_args)
address = self.evm(evm, sender, endowment, gas)
assert len(self.block.get_code(address)), "Contract code empty"
_abi = language.mk_full_signature(code, **cn_args)
return ABIContract(self, _abi, address, listen=listen, log_listener=log_listener)
def evm(self, evm, sender=k0, endowment=0, gas=None):
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
tx = t.contract(sendnonce, gas_price, gas_limit, endowment, evm)
tx.sign(sender)
if gas is not None:
tx.startgas = gas
# print('starting', tx.startgas, gas_limit)
(s, a) = pb.apply_transaction(self.block, tx)
if not s:
raise ContractCreationFailed()
return a
def call(*args, **kwargs):
raise Exception("Call deprecated. Please use the abi_contract "
"mechanism or send(sender, to, value, "
"data) directly, using the abi module to generate "
"data if needed")
def _send(self, sender, to, value, evmdata='', output=None,
funid=None, abi=None, profiling=0):
if funid is not None or abi is not None:
raise Exception("Send with funid+abi is deprecated. Please use"
" the abi_contract mechanism")
tm, g = time.time(), self.block.gas_used
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
tx = t.Transaction(sendnonce, gas_price, gas_limit, to, value, evmdata)
self.last_tx = tx
tx.sign(sender)
recorder = LogRecorder() if profiling > 1 else None
(s, o) = pb.apply_transaction(self.block, tx)
if not s:
raise TransactionFailed()
out = {"output": o}
if profiling > 0:
zero_bytes = tx.data.count(ascii_chr(0))
non_zero_bytes = len(tx.data) - zero_bytes
intrinsic_gas_used = opcodes.GTXDATAZERO * zero_bytes + \
opcodes.GTXDATANONZERO * non_zero_bytes
ntm, ng = time.time(), self.block.gas_used
out["time"] = ntm - tm
out["gas"] = ng - g - intrinsic_gas_used
if profiling > 1:
trace = recorder.pop_records()
ops = [x['op'] for x in trace if x['event'] == 'vm']
opdict = {}
for op in ops:
opdict[op] = opdict.get(op, 0) + 1
out["ops"] = opdict
return out
def profile(self, *args, **kwargs):
kwargs['profiling'] = True
return self._send(*args, **kwargs)
def send(self, *args, **kwargs):
return self._send(*args, **kwargs)["output"]
def mkspv(self, sender, to, value, data=[], funid=None, abi=None):
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
if funid is not None:
evmdata = serpent.encode_abi(funid, *abi)
else:
evmdata = serpent.encode_datalist(*data)
tx = t.Transaction(sendnonce, gas_price, gas_limit, to, value, evmdata)
self.last_tx = tx
tx.sign(sender)
return spv.mk_transaction_spv_proof(self.block, tx)
def verifyspv(self, sender, to, value, data=[],
funid=None, abi=None, proof=[]):
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
if funid is not None:
evmdata = serpent.encode_abi(funid, *abi)
else:
evmdata = serpent.encode_datalist(*data)
tx = t.Transaction(sendnonce, gas_price, gas_limit, to, value, evmdata)
self.last_tx = tx
tx.sign(sender)
return spv.verify_transaction_spv_proof(self.block, tx, proof)
def trace(self, sender, to, value, data=[]):
# collect log events (independent of loglevel filters)
recorder = LogRecorder()
self.send(sender, to, value, data)
return recorder.pop_records()
def mine(self, n=1, coinbase=a0):
for i in range(n):
self.block.finalize()
self.block.commit_state()
self.db.put(self.block.hash, rlp.encode(self.block))
t = self.block.timestamp + 6 + rand() % 12
x = b.Block.init_from_parent(self.block, coinbase, timestamp=t)
# copy listeners
x.log_listeners = self.block.log_listeners
self.block = x
self.blocks.append(self.block)
def snapshot(self):
return rlp.encode(self.block)
def revert(self, data):
self.block = rlp.decode(data, b.Block, env=self.env)
# logging
def set_logging_level(lvl=0):
trace_lvl_map = [
':info',
'eth.vm.log:trace',
':info,eth.vm.log:trace,eth.vm.exit:trace',
':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace',
':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,' +
'eth.vm.storage:trace,eth.vm.memory:trace'
]
configure_logging(config_string=trace_lvl_map[lvl])
if lvl == 0:
set_level(None, 'info')
print('Set logging level: %d' % lvl)
def set_log_trace(logger_names=[]):
"""
sets all named loggers to level 'trace'
attention: vm.op.* are only active if vm.op is active
"""
for name in logger_names:
assert name in slogging.get_logger_names()
slogging.set_level(name, 'trace')
def enable_logging():
set_logging_level(1)
def disable_logging():
set_logging_level(0)
gas_limit = 1000000
gas_price = 1
| |
from pypy.rlib.parsing.ebnfparse import parse_ebnf, make_parse_function
from pypy.rlib.parsing.tree import RPythonVisitor
import py
from ripe import compiler, ripedir
from ripe.objects import W_Integer
grammar = py.path.local(ripedir).join("grammar.txt").read("rt")
regexs, rules, ToAST = parse_ebnf(grammar)
_parse = make_parse_function(regexs, rules, eof=True)
BASES = {"BINARY" : 2, "OCTAL" : 8, "DECIMAL" : 10, "HEX" : 16}
class Node(object):
"""
An AST node.
"""
def __eq__(self, other):
return (
self.__class__ == other.__class__ and
self.__dict__ == other.__dict__
)
def __ne__(self, other):
return not self == other
def __repr__(self):
contents = ("%s=%r" % (k, v) for k, v in self.__dict__.iteritems())
return "<%s %s>" % (self.__class__.__name__, ", ".join(contents))
class Compound(Node):
def __init__(self, statements=None):
if statements is None:
statements = []
self.statements = statements
def compile(self, context):
for statement in self.statements:
statement.compile(context)
class Expression(Node):
def __init__(self, expr):
self.expr = expr
def compile(self, context):
self.expr.compile(context)
context.emit(compiler.DISCARD_TOP)
class Assign(Node):
def __init__(self, name, expr):
self.name = name
self.expr = expr
def compile(self, context):
self.expr.compile(context)
context.emit(compiler.ASSIGN, context.register_variable(self.name))
class Variable(Node):
def __init__(self, name):
self.name = name
def compile(self, context):
context.emit(
compiler.LOAD_VARIABLE, context.register_variable(self.name),
)
class BinOp(Node):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def compile(self, context):
self.left.compile(context)
self.right.compile(context)
context.emit(compiler.BINOP[self.op])
class Int(Node):
def __init__(self, value):
self.value = value
def compile(self, context):
w_self = W_Integer(self.value)
context.emit(compiler.LOAD_CONSTANT, context.register_constant(w_self))
def neg(self):
return self.__class__(-self.value)
class SingleQString(Node):
def __init__(self, value):
self.value = value
class DoubleQString(Node):
def __init__(self, value):
self.value = value
class If(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
self.condition.compile(context)
context.emit(compiler.JUMP_IF_FALSE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.data[jmp_pos] = chr(len(context.data))
class Unless(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
self.condition.compile(context)
context.emit(compiler.JUMP_IF_TRUE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.data[jmp_pos] = chr(len(context.data))
class While(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
start_pos = len(context.data)
self.condition.compile(context)
context.emit(compiler.JUMP_IF_FALSE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.emit(compiler.JUMP_BACKWARD, start_pos)
context.data[jmp_pos] = chr(len(context.data))
class Until(Node):
def __init__(self, condition, body):
self.condition = condition
self.body = body
def compile(self, context):
start_pos = len(context.data)
self.condition.compile(context)
context.emit(compiler.JUMP_IF_TRUE, 0)
jmp_pos = len(context.data) - 1
self.body.compile(context)
context.emit(compiler.JUMP_BACKWARD, start_pos)
context.data[jmp_pos] = chr(len(context.data))
class Puts(Node):
# XXX
def __init__(self, expr):
self.expr = expr
def compile(self, context):
self.expr.compile(context)
context.emit(compiler.PUTS, 0)
class Method(Node):
def __init__(self, name, params, body):
self.name = name
self.params = params
self.body = body
class Transformer(RPythonVisitor):
def dispatch(self, node):
return getattr(self, "visit_%s" % node.symbol)(node)
def visit_program(self, node):
if not node.children:
return Compound()
statements, = node.children
return self.dispatch(statements)
def visit_statements(self, node):
return Compound([
self.dispatch(statement) for statement in node.children
])
def visit_expression_statement(self, node):
expression, = node.children
return Expression(self.dispatch(expression))
def visit_assignment_statement(self, node):
variable, obj = node.children[0].children[0].children
variable = "".join(v.additional_info for v in variable.children)
obj, = obj.children
return Assign(variable, self.dispatch(obj))
def visit_numeric_literal(self, node):
number = self.dispatch(node.children[-1])
if len(node.children) == 2:
sign, _ = node.children
if sign.additional_info.count("-") % 2:
return number.neg()
return number
def visit_integer_literal(self, node):
integer, = node.children
base = integer.symbol.split("_", 1)[0]
value = integer.additional_info
value = value[2:] if value.startswith("0") and value != "0" else value
return Int(int(value, BASES[base]))
def visit_string_literal(self, node):
string, = node.children
value = string.additional_info
end = len(value) - 1
assert end > 0
value = value[1:end]
if string.symbol == "SINGLE_QUOTED_STRING":
return SingleQString(value)
elif string.symbol == "DOUBLE_QUOTED_STRING":
return DoubleQString(value)
raise NotImplementedError
def visit_variable(self, node):
name = "".join(v.additional_info for v in node.children)
return Variable(name)
def visit_equality_expression(self, node):
left, op, right = node.children
return BinOp(
self.dispatch(left), op.additional_info, self.dispatch(right)
)
def visit_if_expression(self, node):
condition, then = node.children
body, = then.children
return If(self.dispatch(condition), self.dispatch(body))
def visit_unless_expression(self, node):
condition, then = node.children
body, = then.children
return Unless(self.dispatch(condition), self.dispatch(body))
def visit_while_expression(self, node):
condition, do = node.children
body, = do.children
return While(self.dispatch(condition), self.dispatch(body))
def visit_until_expression(self, node):
condition, do = node.children
body, = do.children
return Until(self.dispatch(condition), self.dispatch(body))
def visit_puts_statement(self, node):
# XXX
return Puts(self.dispatch(node.children[0]))
def visit_method_definition(self, node):
name, params_node, body = node.children
name, body = name.additional_info, self.dispatch(body)
if params_node.children:
params_list, = params_node.children
params = [param.additional_info for param in params_list.children]
else:
params = []
return Method(name, params, body)
transformer = Transformer()
def parse(source, transformer=transformer):
"""
Parse the source code and produce an AST.
"""
ast = ToAST().transform(_parse(source))
return transformer.visit_program(ast)
| |
from collections import Counter
import gym
import numpy as np
import os
import random
import time
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.examples.policy.random_policy import RandomPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.test_utils import check, framework_iterator
from ray.tune.registry import register_env
class MockPolicy(RandomPolicy):
@override(RandomPolicy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
return np.array([random.choice([0, 1])] * len(obs_batch)), [], {}
@override(Policy)
def postprocess_trajectory(self,
batch,
other_agent_batches=None,
episode=None):
assert episode is not None
super().postprocess_trajectory(batch, other_agent_batches, episode)
return compute_advantages(
batch, 100.0, 0.9, use_gae=False, use_critic=False)
class BadPolicy(RandomPolicy):
@override(RandomPolicy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
raise Exception("intentional error")
class FailOnStepEnv(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
raise ValueError("kaboom")
def step(self, action):
raise ValueError("kaboom")
class MockEnv(gym.Env):
def __init__(self, episode_length, config=None):
self.episode_length = episode_length
self.config = config
self.i = 0
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return 0, 1, self.i >= self.episode_length, {}
class MockEnv2(gym.Env):
def __init__(self, episode_length):
self.episode_length = episode_length
self.i = 0
self.observation_space = gym.spaces.Discrete(100)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.i = 0
return self.i
def step(self, action):
self.i += 1
return self.i, 100, self.i >= self.episode_length, {}
class MockVectorEnv(VectorEnv):
def __init__(self, episode_length, num_envs):
super().__init__(
observation_space=gym.spaces.Discrete(1),
action_space=gym.spaces.Discrete(2),
num_envs=num_envs)
self.envs = [MockEnv(episode_length) for _ in range(num_envs)]
@override(VectorEnv)
def vector_reset(self):
return [e.reset() for e in self.envs]
@override(VectorEnv)
def reset_at(self, index):
return self.envs[index].reset()
@override(VectorEnv)
def vector_step(self, actions):
obs_batch, rew_batch, done_batch, info_batch = [], [], [], []
for i in range(len(self.envs)):
obs, rew, done, info = self.envs[i].step(actions[i])
obs_batch.append(obs)
rew_batch.append(rew)
done_batch.append(done)
info_batch.append(info)
return obs_batch, rew_batch, done_batch, info_batch
@override(VectorEnv)
def get_unwrapped(self):
return self.envs
class TestRolloutWorker(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=5)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_basic(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"), policy=MockPolicy)
batch = ev.sample()
for key in [
"obs", "actions", "rewards", "dones", "advantages",
"prev_rewards", "prev_actions"
]:
self.assertIn(key, batch)
self.assertGreater(np.abs(np.mean(batch[key])), 0)
def to_prev(vec):
out = np.zeros_like(vec)
for i, v in enumerate(vec):
if i + 1 < len(out) and not batch["dones"][i]:
out[i + 1] = v
return out.tolist()
self.assertEqual(batch["prev_rewards"].tolist(),
to_prev(batch["rewards"]))
self.assertEqual(batch["prev_actions"].tolist(),
to_prev(batch["actions"]))
self.assertGreater(batch["advantages"][0], 1)
ev.stop()
def test_batch_ids(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
rollout_fragment_length=1)
batch1 = ev.sample()
batch2 = ev.sample()
self.assertEqual(len(set(batch1["unroll_id"])), 1)
self.assertEqual(len(set(batch2["unroll_id"])), 1)
self.assertEqual(
len(set(SampleBatch.concat(batch1, batch2)["unroll_id"])), 2)
ev.stop()
def test_global_vars_update(self):
# Allow for Unittest run.
ray.init(num_cpus=5, ignore_reinit_error=True)
for fw in framework_iterator(frameworks=()):
agent = A2CTrainer(
env="CartPole-v0",
config={
"num_workers": 1,
"lr_schedule": [[0, 0.1], [100000, 0.000001]],
"framework": fw,
})
result = agent.train()
for i in range(10):
result = agent.train()
print("num_steps_sampled={}".format(
result["info"]["num_steps_sampled"]))
print("num_steps_trained={}".format(
result["info"]["num_steps_trained"]))
print("num_steps_sampled={}".format(
result["info"]["num_steps_sampled"]))
print("num_steps_trained={}".format(
result["info"]["num_steps_trained"]))
if i == 0:
self.assertGreater(
result["info"]["learner"]["default_policy"]["cur_lr"],
0.01)
if result["info"]["learner"]["default_policy"]["cur_lr"] < \
0.07:
break
self.assertLess(
result["info"]["learner"]["default_policy"]["cur_lr"], 0.07)
agent.stop()
def test_no_step_on_init(self):
register_env("fail", lambda _: FailOnStepEnv())
for fw in framework_iterator(frameworks=()):
pg = PGTrainer(
env="fail", config={
"num_workers": 1,
"framework": fw,
})
self.assertRaises(Exception, lambda: pg.train())
pg.stop()
def test_callbacks(self):
for fw in framework_iterator(frameworks=("torch", "tf")):
counts = Counter()
pg = PGTrainer(
env="CartPole-v0", config={
"num_workers": 0,
"rollout_fragment_length": 50,
"train_batch_size": 50,
"callbacks": {
"on_episode_start":
lambda x: counts.update({"start": 1}),
"on_episode_step":
lambda x: counts.update({"step": 1}),
"on_episode_end": lambda x: counts.update({"end": 1}),
"on_sample_end":
lambda x: counts.update({"sample": 1}),
},
"framework": fw,
})
pg.train()
pg.train()
self.assertGreater(counts["sample"], 0)
self.assertGreater(counts["start"], 0)
self.assertGreater(counts["end"], 0)
self.assertGreater(counts["step"], 0)
pg.stop()
def test_query_evaluators(self):
register_env("test", lambda _: gym.make("CartPole-v0"))
for fw in framework_iterator(frameworks=("torch", "tf")):
pg = PGTrainer(
env="test",
config={
"num_workers": 2,
"rollout_fragment_length": 5,
"num_envs_per_worker": 2,
"framework": fw,
})
results = pg.workers.foreach_worker(
lambda ev: ev.rollout_fragment_length)
results2 = pg.workers.foreach_worker_with_index(
lambda ev, i: (i, ev.rollout_fragment_length))
results3 = pg.workers.foreach_worker(
lambda ev: ev.foreach_env(lambda env: 1))
self.assertEqual(results, [10, 10, 10])
self.assertEqual(results2, [(0, 10), (1, 10), (2, 10)])
self.assertEqual(results3, [[1, 1], [1, 1], [1, 1]])
pg.stop()
def test_action_clipping(self):
from ray.rllib.examples.env.random_env import RandomEnv
action_space = gym.spaces.Box(-2.0, 1.0, (3, ))
# Clipping: True (clip between Policy's action_space.low/high),
ev = RolloutWorker(
env_creator=lambda _: RandomEnv(config=dict(
action_space=action_space,
max_episode_len=10,
p_done=0.0,
check_action_bounds=True,
)),
policy=RandomPolicy,
policy_config=dict(
action_space=action_space,
ignore_action_bounds=True,
),
clip_actions=True,
batch_mode="complete_episodes")
sample = ev.sample()
# Check, whether the action bounds have been breached (expected).
# We still arrived here b/c we clipped according to the Env's action
# space.
self.assertGreater(np.max(sample["actions"]), action_space.high[0])
self.assertLess(np.min(sample["actions"]), action_space.low[0])
ev.stop()
# Clipping: False and RandomPolicy produces invalid actions.
# Expect Env to complain.
ev2 = RolloutWorker(
env_creator=lambda _: RandomEnv(config=dict(
action_space=action_space,
max_episode_len=10,
p_done=0.0,
check_action_bounds=True,
)),
policy=RandomPolicy,
policy_config=dict(
action_space=action_space,
ignore_action_bounds=True,
),
clip_actions=False, # <- should lead to Env complaining
batch_mode="complete_episodes")
self.assertRaisesRegex(ValueError, r"Illegal action", ev2.sample)
ev2.stop()
# Clipping: False and RandomPolicy produces valid (bounded) actions.
# Expect "actions" in SampleBatch to be unclipped.
ev3 = RolloutWorker(
env_creator=lambda _: RandomEnv(config=dict(
action_space=action_space,
max_episode_len=10,
p_done=0.0,
check_action_bounds=True,
)),
policy=RandomPolicy,
policy_config=dict(action_space=action_space),
# Should not be a problem as RandomPolicy abides to bounds.
clip_actions=False,
batch_mode="complete_episodes")
sample = ev3.sample()
self.assertGreater(np.min(sample["actions"]), action_space.low[0])
self.assertLess(np.max(sample["actions"]), action_space.high[0])
ev3.stop()
def test_reward_clipping(self):
# Clipping: True (clip between -1.0 and 1.0).
ev = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=True,
batch_mode="complete_episodes")
self.assertEqual(max(ev.sample()["rewards"]), 1)
result = collect_metrics(ev, [])
self.assertEqual(result["episode_reward_mean"], 1000)
ev.stop()
from ray.rllib.examples.env.random_env import RandomEnv
# Clipping in certain range (-2.0, 2.0).
ev2 = RolloutWorker(
env_creator=lambda _: RandomEnv(
dict(
reward_space=gym.spaces.Box(low=-10, high=10, shape=()),
p_done=0.0,
max_episode_len=10,
)),
policy=MockPolicy,
clip_rewards=2.0,
batch_mode="complete_episodes")
sample = ev2.sample()
self.assertEqual(max(sample["rewards"]), 2.0)
self.assertEqual(min(sample["rewards"]), -2.0)
self.assertLess(np.mean(sample["rewards"]), 0.5)
self.assertGreater(np.mean(sample["rewards"]), -0.5)
ev2.stop()
# Clipping: Off.
ev2 = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
clip_rewards=False,
batch_mode="complete_episodes")
self.assertEqual(max(ev2.sample()["rewards"]), 100)
result2 = collect_metrics(ev2, [])
self.assertEqual(result2["episode_reward_mean"], 1000)
ev2.stop()
def test_hard_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv2(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
rollout_fragment_length=10,
episode_horizon=4,
soft_horizon=False)
samples = ev.sample()
# Three logical episodes and correct episode resets (always after 4
# steps).
self.assertEqual(len(set(samples["eps_id"])), 3)
for i in range(4):
self.assertEqual(np.argmax(samples["obs"][i]), i)
self.assertEqual(np.argmax(samples["obs"][4]), 0)
# 3 done values.
self.assertEqual(sum(samples["dones"]), 3)
ev.stop()
# A gym env's max_episode_steps is smaller than Trainer's horizon.
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
batch_mode="complete_episodes",
rollout_fragment_length=10,
episode_horizon=6,
soft_horizon=False)
samples = ev.sample()
# 12 steps due to `complete_episodes` batch_mode.
self.assertEqual(len(samples["eps_id"]), 12)
# Two logical episodes and correct episode resets (always after 6(!)
# steps).
self.assertEqual(len(set(samples["eps_id"])), 2)
# 2 done values after 6 and 12 steps.
check(samples["dones"], [
False, False, False, False, False, True, False, False, False,
False, False, True
])
ev.stop()
def test_soft_horizon(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes",
rollout_fragment_length=10,
episode_horizon=4,
soft_horizon=True)
samples = ev.sample()
# three logical episodes
self.assertEqual(len(set(samples["eps_id"])), 3)
# only 1 hard done value
self.assertEqual(sum(samples["dones"]), 1)
ev.stop()
def test_metrics(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
remote_ev = RolloutWorker.as_remote().remote(
env_creator=lambda _: MockEnv(episode_length=10),
policy=MockPolicy,
batch_mode="complete_episodes")
ev.sample()
ray.get(remote_ev.sample.remote())
result = collect_metrics(ev, [remote_ev])
self.assertEqual(result["episodes_this_iter"], 20)
self.assertEqual(result["episode_reward_mean"], 10)
ev.stop()
def test_async(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
sample_async=True,
policy=MockPolicy)
batch = ev.sample()
for key in ["obs", "actions", "rewards", "dones", "advantages"]:
self.assertIn(key, batch)
self.assertGreater(batch["advantages"][0], 1)
ev.stop()
def test_auto_vectorization(self):
ev = RolloutWorker(
env_creator=lambda cfg: MockEnv(episode_length=20, config=cfg),
policy=MockPolicy,
batch_mode="truncate_episodes",
rollout_fragment_length=2,
num_envs=8)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
indices = []
for env in ev.async_env.vector_env.envs:
self.assertEqual(env.unwrapped.config.worker_index, 0)
indices.append(env.unwrapped.config.vector_index)
self.assertEqual(indices, [0, 1, 2, 3, 4, 5, 6, 7])
ev.stop()
def test_batches_larger_when_vectorized(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(episode_length=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
rollout_fragment_length=4,
num_envs=4)
batch = ev.sample()
self.assertEqual(batch.count, 16)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
batch = ev.sample()
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 4)
ev.stop()
def test_vector_env_support(self):
ev = RolloutWorker(
env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),
policy=MockPolicy,
batch_mode="truncate_episodes",
rollout_fragment_length=10)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 0)
for _ in range(8):
batch = ev.sample()
self.assertEqual(batch.count, 10)
result = collect_metrics(ev, [])
self.assertEqual(result["episodes_this_iter"], 8)
ev.stop()
def test_truncate_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
rollout_fragment_length=15,
batch_mode="truncate_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 15)
ev.stop()
def test_complete_episodes(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
rollout_fragment_length=5,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 10)
ev.stop()
def test_complete_episodes_packing(self):
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
rollout_fragment_length=15,
batch_mode="complete_episodes")
batch = ev.sample()
self.assertEqual(batch.count, 20)
self.assertEqual(
batch["t"].tolist(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
ev.stop()
def test_filter_sync(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
ev.stop()
def test_get_filters(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
self.sample_and_flush(ev)
filters = ev.get_filters(flush_after=False)
time.sleep(2)
filters2 = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
obs_f2 = filters2[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f2.rs.n, obs_f.rs.n)
self.assertGreaterEqual(obs_f2.buffer.n, obs_f.buffer.n)
ev.stop()
def test_sync_filter(self):
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=MockPolicy,
sample_async=True,
observation_filter="ConcurrentMeanStdFilter")
obs_f = self.sample_and_flush(ev)
# Current State
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertLessEqual(obs_f.buffer.n, 20)
new_obsf = obs_f.copy()
new_obsf.rs._n = 100
ev.sync_filters({DEFAULT_POLICY_ID: new_obsf})
filters = ev.get_filters(flush_after=False)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertGreaterEqual(obs_f.rs.n, 100)
self.assertLessEqual(obs_f.buffer.n, 20)
ev.stop()
def test_extra_python_envs(self):
extra_envs = {"env_key_1": "env_value_1", "env_key_2": "env_value_2"}
self.assertFalse("env_key_1" in os.environ)
self.assertFalse("env_key_2" in os.environ)
ev = RolloutWorker(
env_creator=lambda _: MockEnv(10),
policy=MockPolicy,
extra_python_environs=extra_envs)
self.assertTrue("env_key_1" in os.environ)
self.assertTrue("env_key_2" in os.environ)
ev.stop()
# reset to original
del os.environ["env_key_1"]
del os.environ["env_key_2"]
def test_no_env_seed(self):
ev = RolloutWorker(
env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),
policy=MockPolicy,
seed=1)
assert not hasattr(ev.env, "seed")
ev.stop()
def sample_and_flush(self, ev):
time.sleep(2)
ev.sample()
filters = ev.get_filters(flush_after=True)
obs_f = filters[DEFAULT_POLICY_ID]
self.assertNotEqual(obs_f.rs.n, 0)
self.assertNotEqual(obs_f.buffer.n, 0)
return obs_f
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base utilities for loading datasets (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from os import path
import random
import time
import numpy as np
from six.moves import urllib
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
@deprecated(None, 'Use tf.data instead.')
def load_csv_with_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file with a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
header = next(data_file)
n_samples = int(header[0])
n_features = int(header[1])
data = np.zeros((n_samples, n_features), dtype=features_dtype)
target = np.zeros((n_samples,), dtype=target_dtype)
for i, row in enumerate(data_file):
target[i] = np.asarray(row.pop(target_column), dtype=target_dtype)
data[i] = np.asarray(row, dtype=features_dtype)
return Dataset(data=data, target=target)
@deprecated(None, 'Use tf.data instead.')
def load_csv_without_header(filename,
target_dtype,
features_dtype,
target_column=-1):
"""Load dataset from CSV file without a header row."""
with gfile.Open(filename) as csv_file:
data_file = csv.reader(csv_file)
data, target = [], []
for row in data_file:
target.append(row.pop(target_column))
data.append(np.asarray(row, dtype=features_dtype))
target = np.array(target, dtype=target_dtype)
data = np.array(data)
return Dataset(data=data, target=target)
@deprecated(None, 'Use tf.data instead.')
def shrink_csv(filename, ratio):
"""Create a smaller dataset of only 1/ratio of original data."""
filename_small = filename.replace('.', '_small.')
with gfile.Open(filename_small, 'w') as csv_file_small:
writer = csv.writer(csv_file_small)
with gfile.Open(filename) as csv_file:
reader = csv.reader(csv_file)
i = 0
for row in reader:
if i % ratio == 0:
writer.writerow(row)
i += 1
@deprecated(None, 'Use scikits.learn.datasets.')
def load_iris(data_path=None):
"""Load Iris dataset.
Args:
data_path: string, path to iris dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'iris.csv')
return load_csv_with_header(
data_path, target_dtype=np.int, features_dtype=np.float)
@deprecated(None, 'Use scikits.learn.datasets.')
def load_boston(data_path=None):
"""Load Boston housing dataset.
Args:
data_path: string, path to boston dataset (optional)
Returns:
Dataset object containing data in-memory.
"""
if data_path is None:
module_path = path.dirname(__file__)
data_path = path.join(module_path, 'data', 'boston_house_prices.csv')
return load_csv_with_header(
data_path, target_dtype=np.float, features_dtype=np.float)
@deprecated(None, 'Use the retry module or similar alternatives.')
def retry(initial_delay,
max_delay,
factor=2.0,
jitter=0.25,
is_retriable=None):
"""Simple decorator for wrapping retriable functions.
Args:
initial_delay: the initial delay.
factor: each subsequent retry, the delay is multiplied by this value.
(must be >= 1).
jitter: to avoid lockstep, the returned delay is multiplied by a random
number between (1-jitter) and (1+jitter). To add a 20% jitter, set
jitter = 0.2. Must be < 1.
max_delay: the maximum delay allowed (actual max is
max_delay * (1 + jitter).
is_retriable: (optional) a function that takes an Exception as an argument
and returns true if retry should be applied.
"""
if factor < 1:
raise ValueError('factor must be >= 1; was %f' % (factor,))
if jitter >= 1:
raise ValueError('jitter must be < 1; was %f' % (jitter,))
# Generator to compute the individual delays
def delays():
delay = initial_delay
while delay <= max_delay:
yield delay * random.uniform(1 - jitter, 1 + jitter)
delay *= factor
def wrap(fn):
"""Wrapper function factory invoked by decorator magic."""
def wrapped_fn(*args, **kwargs):
"""The actual wrapper function that applies the retry logic."""
for delay in delays():
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
if is_retriable is None:
continue
if is_retriable(e):
time.sleep(delay)
else:
raise
return fn(*args, **kwargs)
return wrapped_fn
return wrap
_RETRIABLE_ERRNOS = {
110, # Connection timed out [socket.py]
}
def _is_retriable(e):
return isinstance(e, IOError) and e.errno in _RETRIABLE_ERRNOS
@deprecated(None, 'Please use urllib or similar directly.')
@retry(initial_delay=1.0, max_delay=16.0, is_retriable=_is_retriable)
def urlretrieve_with_retry(url, filename=None):
return urllib.request.urlretrieve(url, filename)
@deprecated(None, 'Please write your own downloading logic.')
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
temp_file_name, _ = urlretrieve_with_retry(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
| |
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
import chainer.functions
from chainer.functions.math import floor as _floor
from chainer import utils
from chainer.utils import type_check
from chainer import variable
def _convert_value_to_string(value):
if isinstance(value, variable.Variable):
value = value.data
if numpy.isscalar(value):
if value < 0:
return '({})'.format(value)
else:
return str(value)
elif isinstance(value, (numpy.ndarray, cuda.ndarray)):
return 'constant array'
else:
raise ValueError(
'Value must be a scalar, `numpy.ndarray`, `cupy.ndarray` '
'or a `Variable`.\nActual: {}'.format(type(value)))
def _check_constant_type(value):
if numpy.isscalar(value):
return
elif isinstance(value, (numpy.ndarray, cuda.ndarray)):
return
else:
raise TypeError(
'Value must be a scalar, `numpy.ndarray`, `cupy.ndarray` '
'or a `Variable`.\nActual: {}'.format(type(value)))
def _preprocess_const(x, value):
xp = cuda.get_array_module(x)
if not numpy.isscalar(value) and cuda.get_array_module(value) != xp:
# TODO(unno): We can transfer arrays automatically
raise TypeError('Cannot mix cupy.ndarray and numpy.ndarray')
b = xp.broadcast(x, value)
if b.shape != x.shape:
raise ValueError('Failed to broadcast arrays')
return utils.force_type(x.dtype, value)
def _preprocess_rhs(x, value):
if isinstance(value, chainer.Variable):
return value
_check_constant_type(value)
return utils.force_type(x.dtype, value)
class Neg(function_node.FunctionNode):
@property
def label(self):
return '__neg__'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
def forward(self, x):
self.retain_inputs(())
return utils.force_array(-x[0]),
def backward(self, indexes, gy):
return -gy[0],
def neg(self): # -x
"""Element-wise negation.
Returns:
~chainer.Variable: Output variable.
"""
return Neg().apply((self,))[0]
class Absolute(function_node.FunctionNode):
@property
def label(self):
return '|_|'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
return utils.force_array(abs(x[0])),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()[0]
return AbsoluteGrad(x.data).apply(grad_outputs)
class AbsoluteGrad(function_node.FunctionNode):
def __init__(self, x):
super(AbsoluteGrad, self).__init__()
self.x = x
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
return utils.force_array(numpy.sign(self.x) * inputs[0]),
def forward_gpu(self, inputs):
gx0 = cuda.elementwise(
'T x0, T gy', 'T gx0',
'gx0 = ((x0 > 0) - (x0 < 0)) * gy',
'abs_bwd')(self.x, inputs[0])
return gx0,
def backward(self, indexes, grad_outputs):
return AbsoluteGrad(self.x).apply(grad_outputs)
def absolute(self):
"""Element-wise absolute.
Returns:
~chainer.Variable: Output variable.
"""
return Absolute().apply((self,))[0]
class Add(function_node.FunctionNode):
@property
def label(self):
return '_ + _'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, x):
# may broadcast
y = utils.force_array(x[0] + x[1])
return y,
def backward(self, indexes, gy):
return tuple(chainer.functions.sum_to(gy[0], self.inputs[i].shape)
for i in indexes)
class AddConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ + %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(x[0] + value),
def backward(self, indexes, gy):
x_node, = self.inputs
return gy
class MultiAdd(function_node.FunctionNode):
def check_type_forward(self, in_types):
for in_type in in_types:
type_check.expect(in_types[0].dtype == in_type.dtype)
def forward(self, xs):
self.len = len(xs)
if len(xs) == 1:
return xs
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(xs)
and all(x.shape == xs[0].shape for x in xs[1:])):
y = intel64.ideep.multi_add(xs)
else:
# The output should be a new array. Add the first 2 arrays
# and get the result y. Then add the rest arrays to y.
y = xs[0] + xs[1]
for x in xs[2:]:
if x.shape == y.shape:
y += x
else:
y = x + y
return utils.force_array(y),
def backward(self, indexes, gy):
return tuple(chainer.functions.sum_to(gy[0], x_node.shape)
for x_node in self.inputs)
def add(*xs): # lhs + rhs or add more than 2 variables
"""Element-wise addition.
Returns:
~chainer.Variable: Output variable.
"""
if len(xs) == 2:
lhs, rhs = xs
if numpy.isscalar(rhs):
return AddConstant(rhs).apply((lhs,))[0]
rhs = _preprocess_rhs(lhs, rhs)
return Add().apply((lhs, rhs))[0]
else:
return MultiAdd().apply(xs)[0]
class Sub(function_node.FunctionNode):
@property
def label(self):
return '_ - _'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(in_types[0].dtype == in_types[1].dtype)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, x):
# may broadcast
return utils.force_array(x[0] - x[1]),
def backward(self, indexes, gy):
x1, x2 = self.inputs
g, = gy
return (
chainer.functions.sum_to(g, x1.shape) if 0 in indexes else None,
-chainer.functions.sum_to(g, x2.shape) if 1 in indexes else None,
)
def sub(self, rhs): # lhs - rhs
"""Element-wise subtraction.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return AddConstant(-rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Sub().apply((self, rhs))[0]
class SubFromConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s - _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(value - x[0]),
def backward(self, indexes, gy):
g, = gy
return -g,
def rsub(self, rhs): # rhs - lhs
"""Element-wise subtraction.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return SubFromConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Sub().apply((rhs, self))[0]
class Mul(function_node.FunctionNode):
@property
def label(self):
return '_ * _'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
return utils.force_array(x[0] * x[1]),
def backward(self, indexes, gy):
xs = self.get_retained_inputs()
return tuple(
chainer.functions.sum_to(gy[0] * xs[1 - i], xs[i].shape)
for i in indexes
)
class MulConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ * %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(value * x[0]),
def backward(self, indexes, gy):
g, = gy
return self.value * g,
def mul(self, rhs): # lhs * rhs
"""Element-wise multiplication.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return MulConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Mul().apply((self, rhs))[0]
class Div(function_node.FunctionNode):
@property
def label(self):
return '_ / _'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
return utils.force_array(x[0] / x[1]),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return DivGrad().apply((x[0], x[1], grad_outputs[0]))
class DivGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0 = utils.force_array(gy / x1)
gx1 = utils.force_array(-gx0 * x0 / x1)
return utils.sum_to(gx0, x0.shape), utils.sum_to(gx1, x1.shape)
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0, gx1 = cuda.elementwise(
'T x0, T x1, T gy',
'T gx0, T gx1',
'''
gx0 = gy / x1;
gx1 = -gx0 * x0 / x1;
''', 'div_bwd')(x0, x1, gy)
return utils.sum_to(gx0, x0.shape), utils.sum_to(gx1, x1.shape)
def backward(self, indexes, grad_outputs):
x0, x1, gy = self.get_retained_inputs()
ggx0, ggx1 = grad_outputs
ret = []
x1_square = x1 * x1
if 0 in indexes:
if ggx1 is None:
ret.append(None)
else:
gx0 = -ggx1 * gy / x1_square
ret.append(chainer.functions.sum_to(gx0, x0.shape))
if 1 in indexes:
gx1 = None if ggx0 is None else -ggx0 * gy / x1_square
gx1_1 = (None if ggx1 is None else
ggx1 * 2 * gy * x0 / (x1_square * x1))
if gx1 is None:
gx1 = gx1_1
elif gx1_1 is not None:
gx1 += gx1_1
ret.append(None if gx1 is None else
chainer.functions.sum_to(gx1, x1.shape))
if 2 in indexes:
ggy = None if ggx0 is None else ggx0 / x1
ggy_1 = None if ggx1 is None else ggx1 * x0 / x1_square
if ggy is None:
ggy = -ggy_1
elif ggy_1 is not None:
ggy -= ggy_1
ret.append(ggy)
return ret
def div(self, rhs): # lhs / rhs
"""Element-wise division
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return MulConstant(1. / rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Div().apply((self, rhs))[0]
class DivFromConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s / _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
value = _preprocess_const(x[0], self.value)
return utils.force_array(value / x[0]),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return DivFromConstantGrad(self.value).apply((x[0], grad_outputs[0]))
class DivFromConstantGrad(function_node.FunctionNode):
def __init__(self, value):
super(DivFromConstantGrad, self).__init__()
self.value = value
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
value = _preprocess_const(x, self.value)
return utils.force_array(-value * gy / (x ** 2)),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
# TODO(beam2d): Make it not use the input
value = _preprocess_const(x, self.value)
return cuda.elementwise('T x, T gy, T value', 'T gx',
'gx = -value * gy / (x * x)',
'div_from_const_bwd')(x, gy, value),
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
value = _preprocess_const(x.data, self.value)
ret = []
if 0 in indexes:
ret.append(grad_outputs[0] * 2 * value * gy / (x ** 3))
if 1 in indexes:
ret.append(grad_outputs[0] * -value / (x ** 2))
return ret
def rdiv(self, rhs): # rhs / lhs
"""Element-wise division.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return DivFromConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Div().apply((rhs, self))[0]
def floordiv(self, rhs): # lhs // rhs
"""Element-wise floor division.
Returns:
~chainer.Variable: Output variable.
"""
return _floor.floor(div(self, rhs))
def rfloordiv(self, rhs): # rhs // lhs
"""Element-wise floor division.
Returns:
~chainer.Variable: Output variable.
"""
return _floor.floor(rdiv(self, rhs))
class PowVarVar(function_node.FunctionNode):
@property
def label(self):
return '_ ** _'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
self.y = x[0] ** x[1]
return utils.force_array(self.y),
def backward(self, indexes, gy):
inputs = self.get_retained_inputs()
return PowVarVarGrad(self.y).apply((inputs[0], inputs[1], gy[0]))
class PowVarVarGrad(function_node.FunctionNode):
def __init__(self, y):
self.y = y
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].dtype == in_types[2].dtype,
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
one = x1.dtype.type(1)
gx0 = utils.sum_to(
utils.force_array(x1 * (x0 ** (x1 - one)) * gy), x0.shape)
gx1 = utils.sum_to(
utils.force_array(numpy.log(x0) * self.y * gy), x1.shape)
return gx0, gx1
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0, gx1 = cuda.elementwise(
'T x0, T x1, T gy, T y', 'T gx0, T gx1',
'''
gx0 = x1 * pow(x0, x1 - 1) * gy;
gx1 = log(x0) * y * gy;
''', 'pow_var_var_bwd')(x0, x1, gy, self.y)
gx0 = utils.sum_to(gx0, x0.shape)
gx1 = utils.sum_to(gx1, x1.shape)
return gx0, gx1
def backward(self, indexes, ggx):
x0, x1, gy = self.get_retained_inputs()
ggx0, ggx1 = ggx
log_x0 = chainer.functions.log(x0)
pow_x0_x1 = x0 ** x1
pow_x0_x1_1 = x0 ** (x1 - 1)
pow_x0_x1_2 = x0 ** (x1 - 2)
ret = []
if 0 in indexes:
gx0_0 = (0 if ggx0 is None else
ggx0 * x1 * (x1 - 1) * pow_x0_x1_2)
gx0_1 = (0 if ggx1 is None else
ggx1 * pow_x0_x1_1 * (log_x0 * x1 + 1))
gx0 = (gx0_0 + gx0_1) * gy
ret.append(chainer.functions.sum_to(gx0, x0.shape))
if 1 in indexes:
gx1_0 = (0 if ggx0 is None else
ggx0 * pow_x0_x1_1 * (log_x0 * x1 + 1))
gx1_1 = (0 if ggx1 is None else
ggx1 * log_x0 * log_x0 * pow_x0_x1)
gx1 = (gx1_0 + gx1_1) * gy
ret.append(chainer.functions.sum_to(gx1, x1.shape))
if 2 in indexes:
ggy_0 = 0 if ggx0 is None else ggx0 * x1 * pow_x0_x1_1
ggy_1 = 0 if ggx1 is None else ggx1 * log_x0 * pow_x0_x1
ggy = ggy_0 + ggy_1
ret.append(ggy)
return ret
class PowVarConst(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ ** %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
y = x[0] ** _preprocess_const(x[0], self.value)
return utils.force_array(y, x[0].dtype),
def backward(self, indexes, gy):
inputs = self.get_retained_inputs()
return PowVarConstGrad(self.value).apply((inputs[0], gy[0]))
class PowVarConstGrad(function_node.FunctionNode):
def __init__(self, value):
self.value = value
self.val = self.val_1 = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
self.val_1 = _preprocess_const(x, self.value - 1)
gx = utils.force_type(x.dtype, self.value) * (x ** self.val_1) * gy
gx = utils.force_array(gx)
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
self.val = _preprocess_const(x, self.value)
gx = cuda.elementwise(
'T x, T gy, T value', 'T gx',
'gx = value * pow(x, value - 1) * gy',
'pow_var_const_bwd')(x, gy, self.val)
return gx,
def backward(self, indexes, ggx):
x, gy = self.get_retained_inputs()
if self.val is None:
self.val = _preprocess_const(x.data, self.value)
if self.val_1 is None:
self.val_1 = _preprocess_const(x.data, self.value - 1)
val_2 = _preprocess_const(x.data, self.value - 2)
ret = []
if 0 in indexes:
ret.append(ggx[0] * self.val * gy * self.val_1 * x ** val_2)
if 1 in indexes:
ret.append(ggx[0] * self.val * x ** self.val_1)
return ret
def pow(self, rhs): # lhs ** rhs
"""Element-wise power function.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return PowVarConst(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return PowVarVar().apply((self, rhs))[0]
class PowConstVar(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s ** _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_outputs((0,))
value = _preprocess_const(x[0], self.value)
y = value ** x[0]
return utils.force_array(y),
def backward(self, indexes, gy):
outputs = self.get_retained_outputs()
return PowConstVarGrad(self.value).apply((outputs[0], gy[0]))
class PowConstVarGrad(function_node.FunctionNode):
def __init__(self, value):
self.value = value
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
self.value = _preprocess_const(y, self.value)
gx = utils.force_array(
numpy.log(self.value, dtype=y.dtype) * y * gy)
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
self.value = _preprocess_const(y, self.value)
gx = cuda.elementwise(
'T y, T gy, T value', 'T gx',
'gx = log(value) * y * gy',
'pow_const_var_bwd')(y, gy, self.value)
return gx,
def backward(self, indexes, ggx):
y, gy = self.get_retained_inputs()
xp = cuda.get_array_module(y)
gygy = xp.log(self.value) * ggx[0]
ret = []
if 0 in indexes:
ret.append(gygy * gy)
if 1 in indexes:
ret.append(gygy * y)
return ret
def rpow(self, rhs): # rhs ** lhs
"""Element-wise power function.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return PowConstVar(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return PowVarVar().apply((rhs, self))[0]
def matmul(self, rhs): # lhs @ rhs
"""Matrix multiplication.
Returns:
~chainer.Variable: Output variable.
"""
rhs = _preprocess_rhs(self, rhs)
return chainer.functions.matmul(self, rhs)
def rmatmul(self, rhs): # rhs @ lhs
"""Matrix multiplication.
Returns:
~chainer.Variable: Output variable.
"""
rhs = _preprocess_rhs(self, rhs)
return chainer.functions.matmul(rhs, self)
def install_variable_arithmetics():
variable.Variable.__neg__ = neg
variable.Variable.__abs__ = absolute
variable.Variable.__add__ = add
variable.Variable.__radd__ = add
variable.Variable.__sub__ = sub
variable.Variable.__rsub__ = rsub
variable.Variable.__mul__ = mul
variable.Variable.__rmul__ = mul
variable.Variable.__div__ = div
variable.Variable.__truediv__ = div
variable.Variable.__rdiv__ = rdiv
variable.Variable.__rtruediv__ = rdiv
variable.Variable.__floordiv__ = floordiv
variable.Variable.__rfloordiv__ = rfloordiv
variable.Variable.__pow__ = pow
variable.Variable.__rpow__ = rpow
variable.Variable.__matmul__ = matmul
variable.Variable.__rmatmul__ = rmatmul
| |
import pytest
from django.conf import settings
from django.core.management import call_command
import amo
import amo.tests
from addons.management.commands import approve_addons
from devhub.models import AddonLog
from editors.models import ReviewerScore
# Where to monkeypatch "lib.crypto.tasks.sign_addons" so it's correctly mocked.
SIGN_ADDONS = 'addons.management.commands.sign_addons.sign_addons'
# Test the "sign_addons" command.
def test_no_overridden_settings(monkeypatch):
assert not settings.SIGNING_SERVER
assert not settings.PRELIMINARY_SIGNING_SERVER
def no_endpoint(ids, **kwargs):
assert not settings.SIGNING_SERVER
assert not settings.PRELIMINARY_SIGNING_SERVER
monkeypatch.setattr(SIGN_ADDONS, no_endpoint)
call_command('sign_addons', 123)
def test_override_SIGNING_SERVER_setting(monkeypatch):
"""You can override the SIGNING_SERVER settings."""
assert not settings.SIGNING_SERVER
def signing_server(ids, **kwargs):
assert settings.SIGNING_SERVER == 'http://example.com'
monkeypatch.setattr(SIGN_ADDONS, signing_server)
call_command('sign_addons', 123, signing_server='http://example.com')
def test_override_PRELIMINARY_SIGNING_SERVER_setting(monkeypatch):
"""You can override the PRELIMINARY_SIGNING_SERVER settings."""
assert not settings.PRELIMINARY_SIGNING_SERVER
def preliminary_signing_server(ids, **kwargs):
assert settings.PRELIMINARY_SIGNING_SERVER == 'http://example.com'
monkeypatch.setattr(SIGN_ADDONS, preliminary_signing_server)
call_command('sign_addons', 123,
preliminary_signing_server='http://example.com')
def test_force_signing(monkeypatch):
"""You can force signing an addon even if it's already signed."""
def not_forced(ids, force):
assert not force
monkeypatch.setattr(SIGN_ADDONS, not_forced)
call_command('sign_addons', 123)
def is_forced(ids, force):
assert force
monkeypatch.setattr(SIGN_ADDONS, is_forced)
call_command('sign_addons', 123, force=True)
# Test the "approve_addons" command.
@pytest.mark.django_db
def test_approve_addons_get_files_incomplete():
"""An incomplete add-on can't be approved."""
addon = amo.tests.addon_factory(status=amo.STATUS_NULL)
assert approve_addons.get_files([addon.guid]) == []
@pytest.mark.django_db
def test_approve_addons_get_files_bad_guid():
"""An add-on with another guid doesn't get approved."""
addon1 = amo.tests.addon_factory(status=amo.STATUS_UNREVIEWED, guid='foo')
addon1_file = addon1.latest_version.files.get()
addon1_file.update(status=amo.STATUS_UNREVIEWED)
# Create another add-on that we won't get the files for.
addon2 = amo.tests.addon_factory(status=amo.STATUS_UNREVIEWED, guid='bar')
addon2_file = addon2.latest_version.files.get()
addon2_file.update(status=amo.STATUS_UNREVIEWED)
# There's only the addon1's file returned, no other.
assert approve_addons.get_files(['foo']) == [addon1_file]
def id_function(fixture_value):
"""Convert a param from the use_case fixture to a nicer name.
By default, the name (used in the test generated from the parameterized
fixture) will use the fixture name and a number.
Eg: test_foo[use_case0]
Providing explicit 'ids' (either as strings, or as a function) will use
those names instead. Here the name will be something like
test_foo[public-unreviewed-full], for the status values, and if the file is
unreviewed.
"""
addon_status, file_status, review_type = fixture_value
return '{0}-{1}-{2}'.format(amo.STATUS_CHOICES_API[addon_status],
amo.STATUS_CHOICES_API[file_status],
review_type)
@pytest.fixture(
params=[(amo.STATUS_UNREVIEWED, amo.STATUS_UNREVIEWED, 'prelim'),
(amo.STATUS_LITE, amo.STATUS_UNREVIEWED, 'prelim'),
(amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED, 'full'),
(amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED, 'full'),
(amo.STATUS_LITE_AND_NOMINATED, amo.STATUS_LITE, 'full')],
# ids are used to build better names for the tests using this fixture.
ids=id_function)
def use_case(request, db):
"""This fixture will return quadruples for different use cases.
Addon | File1 and 2 | Review type
==============================================================
waiting for prelim | unreviewed | prelim reviewed
prelim reviewed | unreviewed | prelim reviewed
waiting for full | unreviewed | fully reviewed
fully reviewed | unreviewed | fully reviewed
prelim waiting for full | prelim reviewed | fully reviewed
"""
addon_status, file_status, review_type = request.param
addon = amo.tests.addon_factory(status=addon_status, guid='foo')
version = addon.latest_version
file1 = version.files.get()
file1.update(status=file_status)
# A second file for good measure.
file2 = amo.tests.file_factory(version=version, status=file_status)
# If the addon is public, and we change its only file to something else
# than public, it'll change to unreviewed.
addon.update(status=addon_status)
assert addon.reload().status == addon_status
assert file1.reload().status == file_status
assert file2.reload().status == file_status
return (addon, file1, file2, review_type)
@pytest.fixture
def mozilla_user(db):
"""Create and return the "mozilla" user used to auto approve addons."""
return amo.tests.user_factory(id=settings.TASK_USER_ID)
def test_approve_addons_get_files(use_case):
"""Files that need to get approved are returned in the list.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, file2, review_type = use_case
assert approve_addons.get_files([addon.guid]) == [file1, file2]
@pytest.mark.django_db
def test_approve_addons_approve_files_no_review_type():
"""Files which don't need approval don't change status."""
# Create the "mozilla" user, needed for the log.
amo.tests.user_factory(id=settings.TASK_USER_ID)
addon = amo.tests.addon_factory(status=amo.STATUS_PUBLIC)
file_ = addon.versions.get().files.get()
file_.update(status=amo.STATUS_PUBLIC)
approve_addons.approve_files([(file_, None)])
# Nothing changed.
assert addon.reload().status == amo.STATUS_PUBLIC
assert file_.reload().status == amo.STATUS_PUBLIC
def test_approve_addons_approve_files(use_case, mozilla_user):
"""Files are approved using the correct review type.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, file2, review_type = use_case
approve_addons.approve_files([(file1, review_type),
(file2, review_type)])
assert file1.reload().status == (
amo.STATUS_LITE if review_type == 'prelim' else amo.STATUS_PUBLIC)
assert file2.reload().status == (
amo.STATUS_LITE if review_type == 'prelim' else amo.STATUS_PUBLIC)
logs = AddonLog.objects.filter(addon=addon)
assert len(logs) == 2 # One per file.
file1_log, file2_log = logs
# An AddonLog has been created for each approval.
assert file1_log.activity_log.details['comments'] == u'bulk approval'
assert file1_log.activity_log.user == mozilla_user
assert file2_log.activity_log.details['comments'] == u'bulk approval'
assert file2_log.activity_log.user == mozilla_user
# No ReviewerScore was granted, it's an automatic approval.
assert not ReviewerScore.objects.all()
@pytest.mark.django_db
def test_approve_addons_get_review_type_already_approved():
"""The review type for a file that doesn't need approval is None."""
addon = amo.tests.addon_factory(status=amo.STATUS_PUBLIC)
file_ = addon.versions.get().files.get()
file_.update(status=amo.STATUS_PUBLIC)
assert approve_addons.get_review_type(file_) is None
def test_approve_addons_get_review_type(use_case):
"""Review type depends on the file and addon status.
Use cases are quadruples taken from the "use_case" fixture above.
"""
addon, file1, _, review_type = use_case
assert approve_addons.get_review_type(file1) == review_type
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000" ]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-maxreceivebuffer=20000", "-blockmaxsize=999000"],
["-prune=550"]]
def setup_network(self):
self.setup_nodes()
self.prunedir = os.path.join(self.nodes[2].datadir, 'regtest', 'blocks', '')
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args, timewait=900)
self.start_nodes()
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")):
raise AssertionError("blk00000.dat is missing, pruning too early")
self.log.info("Success")
self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir))
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Wait for blk00000.dat to be pruned
wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.start_node(0, extra_args=self.full_node_default_args)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
# Add node1's wallet transactions back to the mempool, to
# avoid the mined blocks from being too small.
self.nodes[1].resendwallettransactions()
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"])
height = self.nodes[1].getblockcount()
self.log.info("Current block height: %d" % height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight))
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
self.log.info("New best height: %d" % self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.start_node(1, extra_args=["-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"])
self.log.info("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
self.log.info("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount())
self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir))
self.log.info("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
# Get node0's wallet transactions back in its mempool, to avoid the
# mined blocks from being too small.
self.nodes[0].resendwallettransactions()
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
self.log.info("Usage should be below target: %d" % usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
self.log.info("Will need to redownload block %d" % self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large blocks are in the block files after it,
# it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.stop_node(node_number)
self.start_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index)))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.start_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
self.start_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
self.start_node(5, extra_args=["-prune=550"])
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
self.log.info("Mining a big blockchain of 995 blocks")
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import (
RegionInstanceGroupsTransport,
DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO,
)
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class RegionInstanceGroupsRestInterceptor:
"""Interceptor for RegionInstanceGroups.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the RegionInstanceGroupsRestTransport.
.. code-block:: python
class MyCustomRegionInstanceGroupsInterceptor(RegionInstanceGroupsRestInterceptor):
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_list_instances(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_instances(response):
logging.log(f"Received response: {response}")
def pre_set_named_ports(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_set_named_ports(response):
logging.log(f"Received response: {response}")
transport = RegionInstanceGroupsRestTransport(interceptor=MyCustomRegionInstanceGroupsInterceptor())
client = RegionInstanceGroupsClient(transport=transport)
"""
def pre_get(
self,
request: compute.GetRegionInstanceGroupRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetRegionInstanceGroupRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionInstanceGroups server.
"""
return request, metadata
def post_get(self, response: compute.InstanceGroup) -> compute.InstanceGroup:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the RegionInstanceGroups server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListRegionInstanceGroupsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListRegionInstanceGroupsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionInstanceGroups server.
"""
return request, metadata
def post_list(
self, response: compute.RegionInstanceGroupList
) -> compute.RegionInstanceGroupList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the RegionInstanceGroups server but before
it is returned to user code.
"""
return response
def pre_list_instances(
self,
request: compute.ListInstancesRegionInstanceGroupsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.ListInstancesRegionInstanceGroupsRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for list_instances
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionInstanceGroups server.
"""
return request, metadata
def post_list_instances(
self, response: compute.RegionInstanceGroupsListInstances
) -> compute.RegionInstanceGroupsListInstances:
"""Post-rpc interceptor for list_instances
Override in a subclass to manipulate the response
after it is returned by the RegionInstanceGroups server but before
it is returned to user code.
"""
return response
def pre_set_named_ports(
self,
request: compute.SetNamedPortsRegionInstanceGroupRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.SetNamedPortsRegionInstanceGroupRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for set_named_ports
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionInstanceGroups server.
"""
return request, metadata
def post_set_named_ports(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for set_named_ports
Override in a subclass to manipulate the response
after it is returned by the RegionInstanceGroups server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class RegionInstanceGroupsRestStub:
_session: AuthorizedSession
_host: str
_interceptor: RegionInstanceGroupsRestInterceptor
class RegionInstanceGroupsRestTransport(RegionInstanceGroupsTransport):
"""REST backend transport for RegionInstanceGroups.
The RegionInstanceGroups API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, RegionInstanceGroupsRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[RegionInstanceGroupsRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or RegionInstanceGroupsRestInterceptor()
self._prep_wrapped_messages(client_info)
class _Get(RegionInstanceGroupsRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetRegionInstanceGroupRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.InstanceGroup:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetRegionInstanceGroupRequest):
The request object. A request message for
RegionInstanceGroups.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.InstanceGroup:
Represents an Instance Group
resource. Instance Groups can be used to
configure a target for load balancing.
Instance groups can either be managed or
unmanaged. To create managed instance
groups, use the instanceGroupManager or
regionInstanceGroupManager resource
instead. Use zonal unmanaged instance
groups if you need to apply load
balancing to groups of heterogeneous
instances or if you need to manage the
instances yourself. You cannot create
regional unmanaged instance groups. For
more information, read Instance groups.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetRegionInstanceGroupRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetRegionInstanceGroupRequest.to_json(
compute.GetRegionInstanceGroupRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.InstanceGroup.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _List(RegionInstanceGroupsRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListRegionInstanceGroupsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.RegionInstanceGroupList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListRegionInstanceGroupsRequest):
The request object. A request message for
RegionInstanceGroups.List. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.RegionInstanceGroupList:
Contains a list of InstanceGroup
resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/instanceGroups",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListRegionInstanceGroupsRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListRegionInstanceGroupsRequest.to_json(
compute.ListRegionInstanceGroupsRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.RegionInstanceGroupList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
class _ListInstances(RegionInstanceGroupsRestStub):
def __hash__(self):
return hash("ListInstances")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListInstancesRegionInstanceGroupsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.RegionInstanceGroupsListInstances:
r"""Call the list instances method over HTTP.
Args:
request (~.compute.ListInstancesRegionInstanceGroupsRequest):
The request object. A request message for
RegionInstanceGroups.ListInstances. See
the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.RegionInstanceGroupsListInstances:
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}/listInstances",
"body": "region_instance_groups_list_instances_request_resource",
},
]
request, metadata = self._interceptor.pre_list_instances(request, metadata)
request_kwargs = compute.ListInstancesRegionInstanceGroupsRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.RegionInstanceGroupsListInstancesRequest.to_json(
compute.RegionInstanceGroupsListInstancesRequest(
transcoded_request["body"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListInstancesRegionInstanceGroupsRequest.to_json(
compute.ListInstancesRegionInstanceGroupsRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.RegionInstanceGroupsListInstances.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list_instances(resp)
return resp
class _SetNamedPorts(RegionInstanceGroupsRestStub):
def __hash__(self):
return hash("SetNamedPorts")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.SetNamedPortsRegionInstanceGroupRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the set named ports method over HTTP.
Args:
request (~.compute.SetNamedPortsRegionInstanceGroupRequest):
The request object. A request message for
RegionInstanceGroups.SetNamedPorts. See
the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}/setNamedPorts",
"body": "region_instance_groups_set_named_ports_request_resource",
},
]
request, metadata = self._interceptor.pre_set_named_ports(request, metadata)
request_kwargs = compute.SetNamedPortsRegionInstanceGroupRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.RegionInstanceGroupsSetNamedPortsRequest.to_json(
compute.RegionInstanceGroupsSetNamedPortsRequest(
transcoded_request["body"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.SetNamedPortsRegionInstanceGroupRequest.to_json(
compute.SetNamedPortsRegionInstanceGroupRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_set_named_ports(resp)
return resp
@property
def get(
self,
) -> Callable[[compute.GetRegionInstanceGroupRequest], compute.InstanceGroup]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(
self,
) -> Callable[
[compute.ListRegionInstanceGroupsRequest], compute.RegionInstanceGroupList
]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list_instances(
self,
) -> Callable[
[compute.ListInstancesRegionInstanceGroupsRequest],
compute.RegionInstanceGroupsListInstances,
]:
stub = self._STUBS.get("list_instances")
if not stub:
stub = self._STUBS["list_instances"] = self._ListInstances(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def set_named_ports(
self,
) -> Callable[[compute.SetNamedPortsRegionInstanceGroupRequest], compute.Operation]:
stub = self._STUBS.get("set_named_ports")
if not stub:
stub = self._STUBS["set_named_ports"] = self._SetNamedPorts(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("RegionInstanceGroupsRestTransport",)
| |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import date
from nose.tools import assert_equal
from allura.lib import helpers as h
from allura.tests import decorators as td
from allura import model as M
from alluratest.controller import TestRestApiBase
from forgeblog import model as BM
class TestBlogApi(TestRestApiBase):
def setUp(self):
super(TestBlogApi, self).setUp()
self.setup_with_tools()
@td.with_tool('test', 'Blog', 'blog')
def setup_with_tools(self):
h.set_context('test', 'blog', neighborhood='Projects')
def test_create_post(self):
data = {
'title': 'test',
'text': 'test text',
'state': 'published',
'labels': 'label1, label2'
}
r = self.api_post('/rest/p/test/blog/', **data)
assert_equal(
r.location, 'http://localhost/rest/p/test/blog/%s/%s/test/' %
(date.today().strftime("%Y"), date.today().strftime("%m")))
assert_equal(r.status_int, 201)
url = '/rest' + BM.BlogPost.query.find().first().url()
r = self.api_get('/rest/p/test/blog/')
assert_equal(r.json['posts'][0]['title'], 'test')
assert_equal(r.json['posts'][0]['url'], h.absurl(url))
r = self.api_get(url)
assert_equal(r.json['title'], data['title'])
assert_equal(r.json['text'], data['text'])
assert_equal(r.json['author'], 'test-admin')
assert_equal(r.json['state'], data['state'])
assert_equal(r.json['labels'], data['labels'].split(','))
def test_update_post(self):
data = {
'title': 'test',
'text': 'test text',
'state': 'published',
'labels': 'label1, label2'
}
r = self.api_post('/rest/p/test/blog/', **data)
assert_equal(r.status_int, 201)
url = '/rest' + BM.BlogPost.query.find().first().url()
data = {
'text': 'test text2',
'state': 'draft',
'labels': 'label3'
}
self.api_post(url, **data)
r = self.api_get(url)
assert_equal(r.json['title'], 'test')
assert_equal(r.json['text'], data['text'])
assert_equal(r.json['state'], data['state'])
assert_equal(r.json['labels'], data['labels'].split(','))
def test_delete_post(self):
data = {
'title': 'test',
'state': 'published',
'labels': 'label1, label2'
}
r = self.api_post('/rest/p/test/blog/', **data)
assert_equal(r.status_int, 201)
url = '/rest' + BM.BlogPost.query.find().first().url()
self.api_post(url, delete='')
r = self.api_get(url)
assert_equal(r.status_int, 404)
def test_post_does_not_exist(self):
r = self.api_get('/rest/p/test/blog/2013/07/fake/')
assert_equal(r.status_int, 404)
def test_read_permissons(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='published')
self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'}, status=200)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_read = M.ACE.allow(anon, 'read')
acl.remove(anon_read)
self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'},
status=401)
def test_new_post_permissons(self):
self.app.post('/rest/p/test/blog/',
params=dict(title='test', text='test text',
state='published'),
extra_environ={'username': '*anonymous'},
status=401)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_write = M.ACE.allow(anon, 'write')
acl.append(anon_write)
self.app.post('/rest/p/test/blog/',
params=dict(title='test', text='test text',
state='published'),
extra_environ={'username': '*anonymous'},
status=201)
def test_update_post_permissons(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='published')
url = '/rest' + BM.BlogPost.query.find().first().url()
self.app.post(url.encode('utf-8'),
params=dict(title='test2', text='test text2',
state='published'),
extra_environ={'username': '*anonymous'},
status=401)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_write = M.ACE.allow(anon, 'write')
acl.append(anon_write)
self.app.post(url.encode('utf-8'),
params=dict(title='test2', text='test text2',
state='published'),
extra_environ={'username': '*anonymous'},
status=200)
r = self.api_get(url)
assert_equal(r.json['title'], 'test2')
assert_equal(r.json['text'], 'test text2')
assert_equal(r.json['state'], 'published')
def test_permission_draft_post(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='draft')
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'], [])
url = '/rest' + BM.BlogPost.query.find().first().url()
self.app.post(url.encode('utf-8'),
params=dict(title='test2', text='test text2',
state='published'),
extra_environ={'username': '*anonymous'},
status=401)
p = M.Project.query.get(shortname='test')
acl = p.app_instance('blog').config.acl
anon = M.ProjectRole.by_name('*anonymous')._id
anon_write = M.ACE.allow(anon, 'write')
acl.append(anon_write)
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'][0]['title'], 'test')
def test_draft_post(self):
self.api_post('/rest/p/test/blog/', title='test',
text='test text', state='draft')
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'], [])
url = '/rest' + BM.BlogPost.query.find().first().url()
self.api_post(url, state='published')
r = self.app.get('/rest/p/test/blog/',
extra_environ={'username': '*anonymous'})
assert_equal(r.json['posts'][0]['title'], 'test')
def test_pagination(self):
self.api_post('/rest/p/test/blog/', title='test1',
text='test text1', state='published')
self.api_post('/rest/p/test/blog/', title='test2',
text='test text2', state='published')
self.api_post('/rest/p/test/blog/', title='test3',
text='test text3', state='published')
r = self.api_get('/rest/p/test/blog/', limit='1', page='0')
assert_equal(r.json['posts'][0]['title'], 'test3')
assert_equal(len(r.json['posts']), 1)
assert_equal(r.json['count'], 3)
assert_equal(r.json['limit'], 1)
assert_equal(r.json['page'], 0)
r = self.api_get('/rest/p/test/blog/', limit='2', page='0')
assert_equal(r.json['posts'][0]['title'], 'test3')
assert_equal(r.json['posts'][1]['title'], 'test2')
assert_equal(len(r.json['posts']), 2)
assert_equal(r.json['count'], 3)
assert_equal(r.json['limit'], 2)
assert_equal(r.json['page'], 0)
r = self.api_get('/rest/p/test/blog/', limit='1', page='2')
assert_equal(r.json['posts'][0]['title'], 'test1')
assert_equal(r.json['count'], 3)
assert_equal(r.json['limit'], 1)
assert_equal(r.json['page'], 2)
def test_has_access_no_params(self):
self.api_get('/rest/p/test/blog/has_access', status=404)
self.api_get('/rest/p/test/blog/has_access?user=root', status=404)
self.api_get('/rest/p/test/blog/has_access?perm=read', status=404)
def test_has_access_unknown_params(self):
"""Unknown user and/or permission always False for has_access API"""
r = self.api_get(
'/rest/p/test/blog/has_access?user=babadook&perm=read',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
r = self.api_get(
'/rest/p/test/blog/has_access?user=test-user&perm=jump',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
def test_has_access_not_admin(self):
"""
User which has no 'admin' permission on neighborhood can't use
has_access API
"""
self.api_get(
'/rest/p/test/blog/has_access?user=test-admin&perm=admin',
user='test-user',
status=403)
def test_has_access(self):
r = self.api_get(
'/rest/p/test/blog/has_access?user=test-admin&perm=post&access_token=ABCDEF',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], True)
r = self.api_get(
'/rest/p/test/blog/has_access?user=*anonymous&perm=admin',
user='root')
assert_equal(r.status_int, 200)
assert_equal(r.json['result'], False)
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PHP devappserver2 runtime."""
import base64
import cStringIO
import httplib
import logging
import os
import subprocess
import sys
import time
import urllib
import google
from google.appengine.api import appinfo
from google.appengine.tools.devappserver2 import environ_utils
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import php
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import safe_subprocess
from google.appengine.tools.devappserver2 import wsgi_server
SDK_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'php/sdk'))
if not os.path.exists(SDK_PATH):
SDK_PATH = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),
'php/sdk'))
SETUP_PHP_PATH = os.path.join(os.path.dirname(php.__file__), 'setup.php')
class PHPRuntime(object):
"""A WSGI application that runs PHP scripts using the PHP CGI binary."""
def __init__(self, config):
logging.debug('Initializing runtime with %s', config)
self.config = config
if appinfo.MODULE_SEPARATOR not in config.version_id:
module_id = appinfo.DEFAULT_MODULE
version_id = config.version_id
else:
module_id, version_id = config.version_id.split(appinfo.MODULE_SEPARATOR)
self.environ_template = {
'APPLICATION_ID': str(config.app_id),
'CURRENT_MODULE_ID': module_id,
'CURRENT_VERSION_ID': version_id,
'DATACENTER': str(config.datacenter),
'INSTANCE_ID': str(config.instance_id),
'APPENGINE_RUNTIME': 'php',
'AUTH_DOMAIN': str(config.auth_domain),
'HTTPS': 'off',
# By default php-cgi does not allow .php files to be run directly so
# REDIRECT_STATUS must be set. See:
# http://php.net/manual/en/security.cgi-bin.force-redirect.php
'REDIRECT_STATUS': '1',
'REMOTE_API_HOST': str(config.api_host),
'REMOTE_API_PORT': str(config.api_port),
'SERVER_SOFTWARE': http_runtime_constants.SERVER_SOFTWARE,
'TZ': 'UTC',
}
self.environ_template.update((env.key, env.value) for env in config.environ)
def make_php_cgi_environ(self, environ):
"""Returns a dict of environ for php-cgi based off the wsgi environ."""
user_environ = self.environ_template.copy()
environ_utils.propagate_environs(environ, user_environ)
user_environ['REQUEST_METHOD'] = environ.get('REQUEST_METHOD', 'GET')
user_environ['PATH_INFO'] = environ['PATH_INFO']
user_environ['QUERY_STRING'] = environ['QUERY_STRING']
# Construct the partial URL that PHP expects for REQUEST_URI
# (http://php.net/manual/en/reserved.variables.server.php) using part of
# the process described in PEP-333
# (http://www.python.org/dev/peps/pep-0333/#url-reconstruction).
user_environ['REQUEST_URI'] = urllib.quote(user_environ['PATH_INFO'])
if user_environ['QUERY_STRING']:
user_environ['REQUEST_URI'] += '?' + user_environ['QUERY_STRING']
# Modify the SCRIPT_FILENAME to specify the setup script that readies the
# PHP environment. Put the user script in REAL_SCRIPT_FILENAME.
user_environ['REAL_SCRIPT_FILENAME'] = os.path.normpath(
os.path.join(self.config.application_root,
environ[http_runtime_constants.SCRIPT_HEADER].lstrip('/')))
user_environ['SCRIPT_FILENAME'] = SETUP_PHP_PATH
user_environ['REMOTE_REQUEST_ID'] = environ[
http_runtime_constants.REQUEST_ID_ENVIRON]
# Pass the APPLICATION_ROOT so we can use it in the setup script. We will
# remove it from the environment before we execute the user script.
user_environ['APPLICATION_ROOT'] = self.config.application_root
if 'CONTENT_TYPE' in environ:
user_environ['CONTENT_TYPE'] = environ['CONTENT_TYPE']
user_environ['HTTP_CONTENT_TYPE'] = environ['CONTENT_TYPE']
if 'CONTENT_LENGTH' in environ:
user_environ['CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
user_environ['HTTP_CONTENT_LENGTH'] = environ['CONTENT_LENGTH']
# On Windows, in order to run a side-by-side assembly the specified env
# must include a valid SystemRoot.
if 'SYSTEMROOT' in os.environ:
user_environ['SYSTEMROOT'] = os.environ['SYSTEMROOT']
# On Windows, TMP & TEMP environmental variables are used by GetTempPath
# http://msdn.microsoft.com/library/windows/desktop/aa364992(v=vs.85).aspx
if 'TMP' in os.environ:
user_environ['TMP'] = os.environ['TMP']
if 'TEMP' in os.environ:
user_environ['TEMP'] = os.environ['TEMP']
if self.config.php_config.enable_debugger:
user_environ['XDEBUG_CONFIG'] = environ.get('XDEBUG_CONFIG', '')
return user_environ
def make_php_cgi_args(self):
"""Returns an array of args for php-cgi based on self.config."""
# See http://www.php.net/manual/en/ini.core.php#ini.include-path.
include_paths = ['.', self.config.application_root, SDK_PATH]
if sys.platform == 'win32':
# See https://bugs.php.net/bug.php?id=46034 for quoting requirements.
include_path = 'include_path="%s"' % ';'.join(include_paths)
else:
include_path = 'include_path=%s' % ':'.join(include_paths)
args = [self.config.php_config.php_executable_path, '-d', include_path]
# Load php.ini from application's root.
args.extend(['-c', self.config.application_root])
if self.config.php_config.enable_debugger:
args.extend(['-d', 'xdebug.default_enable="1"'])
args.extend(['-d', 'xdebug.overload_var_dump="1"'])
args.extend(['-d', 'xdebug.remote_enable="1"'])
if self.config.php_config.xdebug_extension_path:
args.extend(['-d', 'zend_extension="%s"' %
self.config.php_config.xdebug_extension_path])
if self.config.php_config.gae_extension_path:
args.extend(['-d', 'extension="%s"' % os.path.basename(
self.config.php_config.gae_extension_path)])
args.extend(['-d', 'extension_dir="%s"' % os.path.dirname(
self.config.php_config.gae_extension_path)])
return args
def __call__(self, environ, start_response):
"""Handles an HTTP request for the runtime using a PHP executable.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
user_environ = self.make_php_cgi_environ(environ)
if 'CONTENT_LENGTH' in environ:
content = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
else:
content = ''
args = self.make_php_cgi_args()
# Handles interactive request.
request_type = environ.pop(http_runtime_constants.REQUEST_TYPE_HEADER, None)
if request_type == 'interactive':
args.extend(['-d', 'html_errors="0"'])
user_environ[http_runtime_constants.REQUEST_TYPE_HEADER] = request_type
try:
p = safe_subprocess.start_process(args,
input_string=content,
env=user_environ,
cwd=self.config.application_root,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except Exception as e:
logging.exception('Failure to start PHP with: %s', args)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
return ['Failure to start the PHP subprocess with %r:\n%s' % (args, e)]
if p.returncode:
if request_type == 'interactive':
start_response('200 OK', [('Content-Type', 'text/plain')])
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
return [message.fp.read()]
else:
logging.error('php failure (%r) with:\nstdout:\n%sstderr:\n%s',
p.returncode, stdout, stderr)
start_response('500 Internal Server Error',
[(http_runtime_constants.ERROR_CODE_HEADER, '1')])
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
return [message.fp.read()]
message = httplib.HTTPMessage(cStringIO.StringIO(stdout))
if 'Status' in message:
status = message['Status']
del message['Status']
else:
status = '200 OK'
# Ensures that we avoid merging repeat headers into a single header,
# allowing use of multiple Set-Cookie headers.
headers = []
for name in message:
for value in message.getheaders(name):
headers.append((name, value))
start_response(status, headers)
return [message.fp.read()]
def main():
config = runtime_config_pb2.Config()
config.ParseFromString(base64.b64decode(sys.stdin.read()))
server = wsgi_server.WsgiServer(
('localhost', 0),
request_rewriter.runtime_rewriter_middleware(PHPRuntime(config)))
server.start()
print server.port
sys.stdout.close()
sys.stdout = sys.stderr
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
server.quit()
if __name__ == '__main__':
main()
| |
from unittest import TestCase, main
import eventlet
from eventlet import Queue
from eventlet import pools
from eventlet.support import six
class IntPool(pools.Pool):
def create(self):
self.current_integer = getattr(self, 'current_integer', 0) + 1
return self.current_integer
class TestIntPool(TestCase):
def setUp(self):
self.pool = IntPool(min_size=0, max_size=4)
def test_integers(self):
# Do not actually use this pattern in your code. The pool will be
# exhausted, and unrestoreable.
# If you do a get, you should ALWAYS do a put, probably like this:
# try:
# thing = self.pool.get()
# do stuff
# finally:
# self.pool.put(thing)
# with self.pool.some_api_name() as thing:
# do stuff
self.assertEqual(self.pool.get(), 1)
self.assertEqual(self.pool.get(), 2)
self.assertEqual(self.pool.get(), 3)
self.assertEqual(self.pool.get(), 4)
def test_free(self):
self.assertEqual(self.pool.free(), 4)
gotten = self.pool.get()
self.assertEqual(self.pool.free(), 3)
self.pool.put(gotten)
self.assertEqual(self.pool.free(), 4)
def test_exhaustion(self):
waiter = Queue(0)
def consumer():
gotten = None
try:
gotten = self.pool.get()
finally:
waiter.put(gotten)
eventlet.spawn(consumer)
one, two, three, four = (
self.pool.get(), self.pool.get(), self.pool.get(), self.pool.get())
self.assertEqual(self.pool.free(), 0)
# Let consumer run; nothing will be in the pool, so he will wait
eventlet.sleep(0)
# Wake consumer
self.pool.put(one)
# wait for the consumer
self.assertEqual(waiter.get(), one)
def test_blocks_on_pool(self):
waiter = Queue(0)
def greedy():
self.pool.get()
self.pool.get()
self.pool.get()
self.pool.get()
# No one should be waiting yet.
self.assertEqual(self.pool.waiting(), 0)
# The call to the next get will unschedule this routine.
self.pool.get()
# So this put should never be called.
waiter.put('Failed!')
killable = eventlet.spawn(greedy)
# no one should be waiting yet.
self.assertEqual(self.pool.waiting(), 0)
# Wait for greedy
eventlet.sleep(0)
# Greedy should be blocking on the last get
self.assertEqual(self.pool.waiting(), 1)
# Send will never be called, so balance should be 0.
self.assertFalse(not waiter.full())
eventlet.kill(killable)
def test_ordering(self):
# normal case is that items come back out in the
# same order they are put
one, two = self.pool.get(), self.pool.get()
self.pool.put(one)
self.pool.put(two)
self.assertEqual(self.pool.get(), one)
self.assertEqual(self.pool.get(), two)
def test_putting_to_queue(self):
timer = eventlet.Timeout(0.1)
try:
size = 2
self.pool = IntPool(min_size=0, max_size=size)
queue = Queue()
results = []
def just_put(pool_item, index):
self.pool.put(pool_item)
queue.put(index)
for index in six.moves.range(size + 1):
pool_item = self.pool.get()
eventlet.spawn(just_put, pool_item, index)
for _ in six.moves.range(size + 1):
x = queue.get()
results.append(x)
self.assertEqual(sorted(results), list(six.moves.range(size + 1)))
finally:
timer.cancel()
def test_resize(self):
pool = IntPool(max_size=2)
a = pool.get()
b = pool.get()
self.assertEqual(pool.free(), 0)
# verify that the pool discards excess items put into it
pool.resize(1)
pool.put(a)
pool.put(b)
self.assertEqual(pool.free(), 1)
# resize larger and assert that there are more free items
pool.resize(2)
self.assertEqual(pool.free(), 2)
def test_create_contention(self):
creates = [0]
def sleep_create():
creates[0] += 1
eventlet.sleep()
return "slept"
p = pools.Pool(max_size=4, create=sleep_create)
def do_get():
x = p.get()
self.assertEqual(x, "slept")
p.put(x)
gp = eventlet.GreenPool()
for i in six.moves.range(100):
gp.spawn_n(do_get)
gp.waitall()
self.assertEqual(creates[0], 4)
class TestAbstract(TestCase):
mode = 'static'
def test_abstract(self):
# Going for 100% coverage here
# A Pool cannot be used without overriding create()
pool = pools.Pool()
self.assertRaises(NotImplementedError, pool.get)
class TestIntPool2(TestCase):
mode = 'static'
def setUp(self):
self.pool = IntPool(min_size=3, max_size=3)
def test_something(self):
self.assertEqual(len(self.pool.free_items), 3)
# Cover the clause in get where we get from the free list instead of creating
# an item on get
gotten = self.pool.get()
self.assertEqual(gotten, 1)
class TestOrderAsStack(TestCase):
mode = 'static'
def setUp(self):
self.pool = IntPool(max_size=3, order_as_stack=True)
def test_ordering(self):
# items come out in the reverse order they are put
one, two = self.pool.get(), self.pool.get()
self.pool.put(one)
self.pool.put(two)
self.assertEqual(self.pool.get(), two)
self.assertEqual(self.pool.get(), one)
class RaisePool(pools.Pool):
def create(self):
raise RuntimeError()
class TestCreateRaises(TestCase):
mode = 'static'
def setUp(self):
self.pool = RaisePool(max_size=3)
def test_it(self):
self.assertEqual(self.pool.free(), 3)
self.assertRaises(RuntimeError, self.pool.get)
self.assertEqual(self.pool.free(), 3)
ALWAYS = RuntimeError('I always fail')
SOMETIMES = RuntimeError('I fail half the time')
class TestTookTooLong(Exception):
pass
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GTKPhotoInfo.py
#
# Copyright 2010-2015 Jose Riguera Lopez <jriguera@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A GTK+ implementation for a user interface. PhotoInfo window
"""
__program__ = "photoplace"
__author__ = "Jose Riguera Lopez <jriguera@gmail.com>"
__version__ = "0.6.1"
__date__ = "Dec 2014"
__license__ = "Apache 2.0"
__copyright__ ="(c) Jose Riguera"
import os.path
import cStringIO
import warnings
import types
import re
from PIL import Image
warnings.filterwarnings('ignore', module='gtk')
try:
import pygtk
pygtk.require("2.0")
import gtk
import gobject
except Exception as e:
warnings.resetwarnings()
print("Warning: %s" % str(e))
print("You don't have the PyGTK 2.0 module installed")
raise
warnings.resetwarnings()
from PhotoPlace.definitions import *
from GTKUIdefinitions import *
from PhotoPlace.Facade import parse_str_datetime as parse_str_datetime
# ##############
# JPEG to pixbuf
# ##############
def getPixbuf(geophoto, size=PIXBUFSIZE_GEOPHOTOINFO, interpolation=Image.BILINEAR):
im = Image.open(geophoto.path)
(im_width, im_height) = im.size
# Size transformations
(width, height) = size
mirror = im.resize((width, height), interpolation)
if 'Exif.Image.Orientation' in geophoto.exif.exif_keys:
orientation = geophoto.exif['Exif.Image.Orientation'].value
if orientation == 1:
pass
elif orientation == 2:
# Vertical Mirror
mirror = mirror.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
# Rotation 180
mirror = mirror.transpose(Image.ROTATE_180)
elif orientation == 4:
# Horizontal Mirror
mirror = mirror.transpose(Image.FLIP_TOP_BOTTOM)
elif orientation == 5:
# Horizontal Mirror + Rotation 270
mirror = mirror.transpose(Image.FLIP_TOP_BOTTOM). \
transpose(Image.ROTATE_270)
elif orientation == 6:
# Rotation 270
mirror = mirror.transpose(Image.ROTATE_270)
elif orientation == 7:
# Vertical Mirror + Rotation 270
mirror = mirror.transpose(Image.FLIP_LEFT_RIGHT). \
transpose(Image.ROTATE_270)
elif orientation == 8:
# Rotation 90
mirror = mirror.transpose(Image.ROTATE_90)
#filein = StringIO.StringIO()
filein = cStringIO.StringIO()
mirror.save(filein, 'ppm')
contents = filein.getvalue()
filein.close()
loader = gtk.gdk.PixbufLoader("pnm")
loader.write(contents, len(contents))
loader.close()
pixbuf = loader.get_pixbuf()
return pixbuf
# #################################
# Photo Extended information window
# #################################
class PhotoInfoGUI(gobject.GObject):
"""
Photo Extended information window
"""
_instance = None
__gsignals__ = {
'save' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
'_save': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
'reload' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
'_reload' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
}
# Singleton
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(PhotoInfoGUI, cls).__new__(cls)
return cls._instance
def __init__(self, resourcedir=None, parentwindow=None):
if resourcedir:
gobject.GObject.__init__(self)
guifile = os.path.join(resourcedir, GTKUI_RESOURCE_PhotoInfoGUIXML)
self.builder = gtk.Builder()
self.builder.set_translation_domain(GTKUI_GETTEXT_DOMAIN)
self.builder.add_from_file(guifile)
self.window = self.builder.get_object("window")
self.window.set_transient_for(parentwindow)
self.window.set_destroy_with_parent(True)
self.image = self.builder.get_object("image")
self.label = self.builder.get_object("label-geophotopath")
self.treeview = self.builder.get_object("treeview")
self.treeview.set_tooltip_markup(_("Double click on a cell to edit the value. \n\n"
"If you want to geotag a photo manualy, you have to switch to <b>[write]</b> mode first. "
"You must use the same format as you see for name, date and numbers. \n\n"
"<b>Latitude</b>, <b>Longitude</b>, <b>Elevation</b>: (float) GPS coordinates of the photo.\n\n"
"<b>Status</b>: (integer) value greather than 0 means active.\n\n"
"<b>Time Offset</b>: (integer) seconds to add to the current date/time to get the original date/time.\n\n"
"<b>Point Time</b>: (if appears, date/time, not editable) time of GPX point used to geotag the photo.\n\n"
"<b>Variables</b>: list of attributes settled up by Addons to control their behaviour.")
)
treeviewcolumn_key = self.builder.get_object("treeviewcolumn-key")
treeviewcolumn_key.set_title(_("Property"))
treeviewcolumn_key.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
treeviewcolumn_key.set_resizable(True)
treeviewcolumn_key.set_fixed_width(180)
treeviewcolumn_value = self.builder.get_object("treeviewcolumn-value")
treeviewcolumn_value.set_title(_("Value"))
# deletable, editable, key, value, fgcolor, instance
self.treestore = gtk.TreeStore(bool, bool, str, str, str, gobject.TYPE_PYOBJECT, str)
self.treeview.set_model(self.treestore)
self.treeview.set_rules_hint(True)
self.ready = False
def __getitem__(self, key):
return self.builder.get_object(key)
def __setitem__(self, key, value):
raise ValueError("Cannot set key!")
def init(self, userfacade, treemodel):
self.userfacade = userfacade
self.stzdiff = userfacade.state.stzdiff
self.current_geophoto = None
self.current_pixbuf = None
self.main_treemodel_iterator = None
self.main_treemodel = treemodel
self.cannextprev = True
self.first_time = True
self.signals = {
"on_window_delete_event": self.close,
"on_treeview_row_activated": self._clicked_attr,
"on_cellrenderertext-value_edited": self._edit_cell,
"on_button-add_clicked": self._add_attr,
"on_button-del_clicked": self._del_attr,
"on_button-next_clicked": self.next,
"on_button-close_clicked": self.close,
"on_button-prev_clicked": self.prev,
}
self.builder.connect_signals(self.signals)
self._signals = {
'save' : [],
'reload' : [],
}
self.ready = True
def connect(self, name, *args):
if self.ready:
retval = None
if name.startswith('_'):
retval = gobject.GObject.connect(self, name, *args)
else:
retval = gobject.GObject.connect(self, name, *args)
self._signals[name].append(retval)
return retval
def disconnect(self, identifier=None):
if self.ready:
if identifier:
gobject.GObject.disconnect(self, identifier)
for signal in self._signals:
if identifier in self._signals[signal]:
self._signals[signal].remove(identifier)
else:
for signal in self._signals:
for i in self._signals[signal]:
gobject.GObject.disconnect(self, i)
self._signals[signal].remove(i)
self._signals[signal] = list()
def show(self, geophoto=None, iterator=None, cannextprev=None, sizex=1, sizey=1):
if not self.ready:
return False
if geophoto:
if not iterator:
return False
self.current_geophoto = geophoto
self.main_treemodel_iterator = iterator
dgettext = dict()
dgettext['program'] = PhotoPlace_name
dgettext['photo'] = self.current_geophoto['name']
self.window.set_title('%(program)s: Exif info of <%(photo)s>' % dgettext)
self.current_pixbuf = getPixbuf(self.current_geophoto)
self.label.set_text(self.current_geophoto.path)
img_width, img_height = self._image(self.current_pixbuf)
self._show(self.current_geophoto)
if self.first_time:
if img_width > img_height:
self["hpaned"].set_position(img_width + 2)
else:
self["hpaned"].set_position(img_height + 2)
self.window.show_all()
if cannextprev != None:
self.cannextprev = cannextprev
if not self.cannextprev:
self["button-prev"].hide()
self["button-next"].hide()
else:
if not self._next():
self["button-next"].set_sensitive(False)
else:
self["button-next"].set_sensitive(True)
if not self._prev():
self["button-prev"].set_sensitive(False)
else:
self["button-prev"].set_sensitive(True)
self.first_time = False
return True
def _image(self, pixbuf, interpolation=gtk.gdk.INTERP_BILINEAR):
allocation = self['image'].get_allocation()
allo_width = allocation.width
allo_height = allocation.height
img_width = pixbuf.get_width()
img_height = pixbuf.get_height()
if allo_height == 1:
# fist time
allo_height = 400
if img_width > img_height:
allo_height = 360
percent = float(allo_height) / float(img_height)
allo_width = int(img_width * percent)
scaled = pixbuf.scale_simple(allo_width, allo_height, interpolation)
self.image.set_from_pixbuf(scaled)
return (allo_width, allo_height)
def _show(self, geophoto):
model = self.treeview.get_model()
model.clear()
color = TREEVIEWPHOTOINFO_GEOPHOTOINFO_COLOR
model.append(None,[False, True, _("<b>Name</b>"), str(geophoto['name']), color, types.StringType, 'name'])
model.append(None,[False, True, _("<b>Date/Time</b>"), str(geophoto['time']), color, types.StringType, 'time'])
lat = ''
lon = ''
ele = ''
if geophoto.isGeoLocated():
lon = "%.8f" % geophoto['lon']
lat = "%.8f" % geophoto['lat']
ele = "%.3f" % geophoto['ele']
model.append(None,[False, True, _("<b>Longitude</b>"), lon, color, types.FloatType, 'lon'])
model.append(None,[False, True, _("<b>Latitude</b>"), lat, color, types.FloatType, 'lat'])
model.append(None,[False, True, _("<b>Elevation</b>"), ele, color, types.FloatType, 'ele'])
if geophoto.ptime:
ptime = geophoto.ptime.strftime("%Y-%m-%dT%H:%M:%S") + self.stzdiff
model.append(None,[False, False, _("<b>Point Time</b>"), ptime, color, types.StringType, 'ptime'])
model.append(None,[False, True, _("<b>Time Offset</b>"), "%d" % geophoto['toffset'], color, types.IntType, 'toffset'])
model.append(None,[False, True, _("<b>Status</b>"), "%d" % geophoto['status'], color, types.IntType, 'status'])
color = TREEVIEWPHOTOINFO_GEOPHOTOATTR_COLOR
ite = model.append(None, [False, False, _("Variables"), None, color, None, None])
if geophoto.attr:
for k, v in geophoto.attr.iteritems():
model.append(ite, [ True, True, str(k), str(v), color, type(v), ''])
color = TREEVIEWPHOTOINFO_GEOPHOTOEXIF_COLOR
ite = model.append(None, [ False, False, _("Image EXIF Values"), None, color, None, None])
for k in geophoto.exif.exif_keys:
try:
model.append(ite, [ False, False, str(k), str(geophoto[k]), color, None, ''])
except:
pass
self.treeview.expand_all()
def next(self, widget=None, data=None):
self.emit('save', self.main_treemodel_iterator, self.current_geophoto)
self.emit('_save', self.main_treemodel_iterator, self.current_geophoto)
next = self._next()
if next != None:
geophoto_path = self.main_treemodel.get_value(next, TREEVIEWPHOTOS_COL_PATH)
for geophoto in self.userfacade.state.geophotos:
if geophoto.path == geophoto_path:
self.show(geophoto, next)
return
def _next(self):
next = self.main_treemodel.iter_next(self.main_treemodel_iterator)
while next != None and self.main_treemodel.iter_parent(next) == self.main_treemodel_iterator:
next = self.main_treemodel.iter_next(next)
return next
def prev(self, widget=None, data=None):
self.emit('save', self.main_treemodel_iterator, self.current_geophoto)
self.emit('_save', self.main_treemodel_iterator, self.current_geophoto)
prev = self._prev()
if prev != None:
geophoto_path = self.main_treemodel.get_value(prev, TREEVIEWPHOTOS_COL_PATH)
for geophoto in self.userfacade.state.geophotos:
if geophoto.path == geophoto_path:
self.show(geophoto, prev)
return
def _prev(self):
prev = None
path = self.main_treemodel.get_path(self.main_treemodel_iterator)
position = path[0]
if position != 0:
prev_path = position - 1
prev = self.main_treemodel.get_iter(str(prev_path))
return prev
def _clicked_attr(self, treeview, path, column, data=None):
model = treeview.get_model()
ite = model.get_iter(path)
if model.get_value(ite, TREEVIEWPHOTOINFO_COL_EDIT):
key = model.get_value(ite, TREEVIEWPHOTOINFO_COL_VKEY)
value = model.get_value(ite, TREEVIEWPHOTOINFO_COL_VALUE)
self["entry-key"].set_text(key)
def _add_attr(self, widget, value=''):
key = self["entry-key"].get_text().strip()
if key:
self.current_geophoto.attr[key] = value
self._show(self.current_geophoto)
def _del_attr(self, widget):
key = self["entry-key"].get_text().strip()
if self.current_geophoto.attr.has_key(key):
del self.current_geophoto.attr[key]
self._show(self.current_geophoto)
self["entry-key"].set_text('')
def _edit_cell(self, cell, path_string, new_text):
model = self.treeview.get_model()
treestore_iter = model.get_iter_from_string(path_string)
obj = model.get_value(treestore_iter, TREEVIEWPHOTOINFO_COL_TYPE)
key = model.get_value(treestore_iter, TREEVIEWPHOTOINFO_COL_KEY)
text = new_text.strip()
try:
if key:
value = None
# is a geophoto main attr
if key == "name":
text = re.sub(r"[^\w\s\.\-]", '', text)
text = re.sub(r"\s+", '_', text)
text, new_ext = os.path.splitext(text)
if not text:
raise ValueError
value = text + PhotoPlace_FILE_DEF_EXTENSION
elif key == "lat":
value = obj(text)
if value < -90.0 or value > 90.0:
raise ValueError
elif key == "lon":
value = obj(text)
if value < -180.0 or value > 180.0:
raise ValueError
elif key == "ele":
value = obj(text)
if value < -100.0 or value > 50000.0:
raise ValueError
elif key == "status":
value = obj(text)
if value < 0:
raise ValueError
elif key == "toffset":
value = obj(text)
elif key == "ptime" or key == "time":
(scope, value) = parse_str_datetime(text)
if not scope in [ "day", "hour", "minute", "second" ]:
raise ValueError
if value != None:
self.current_geophoto[key] = value
model.set(treestore_iter, TREEVIEWPHOTOINFO_COL_VALUE, value)
else:
raise ValueError
else:
# user/plugin defined
key = model.get_value(treestore_iter, TREEVIEWPHOTOINFO_COL_VKEY)
self.current_geophoto.attr[key] = obj(text)
model.set(treestore_iter, TREEVIEWPHOTOINFO_COL_VALUE, text)
self.emit('reload', self.main_treemodel_iterator, self.current_geophoto)
self.emit('_reload', self.main_treemodel_iterator, self.current_geophoto)
except:
pass
def close(self, window=None, event=None):
self.emit('save', self.main_treemodel_iterator, self.current_geophoto)
self.emit('_save', self.main_treemodel_iterator, self.current_geophoto)
self.window.hide_all()
self.disconnect()
return True
# EOF
| |
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.utils import importlib
from django.utils.translation import check_for_language, activate, to_locale, get_language
from django.utils.text import javascript_quote
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.REQUEST.get('next')
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
src = []
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
src.append("formats['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(smart_text(v))))
elif isinstance(v, (tuple, list)):
v = [javascript_quote(smart_text(value)) for value in v]
src.append("formats['%s'] = ['%s'];\n" % (javascript_quote(k), "', '".join(v)))
return ''.join(src)
NullSource = """
/* gettext identity library */
function gettext(msgid) { return msgid; }
function ngettext(singular, plural, count) { return (count == 1) ? singular : plural; }
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) { return msgid; }
function npgettext(context, singular, plural, count) { return (count == 1) ? singular : plural; }
"""
LibHead = """
/* gettext library */
var catalog = new Array();
"""
LibFoot = """
function gettext(msgid) {
var value = catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
}
function ngettext(singular, plural, count) {
value = catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[pluralidx(count)];
}
}
function gettext_noop(msgid) { return msgid; }
function pgettext(context, msgid) {
var value = gettext(context + '\\x04' + msgid);
if (value.indexOf('\\x04') != -1) {
value = msgid;
}
return value;
}
function npgettext(context, singular, plural, count) {
var value = ngettext(context + '\\x04' + singular, context + '\\x04' + plural, count);
if (value.indexOf('\\x04') != -1) {
value = ngettext(singular, plural, count);
}
return value;
}
"""
LibFormatHead = """
/* formatting library */
var formats = new Array();
"""
LibFormatFoot = """
function get_format(format_type) {
var value = formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
}
"""
SimplePlural = """
function pluralidx(count) { return (count == 1) ? 0 : 1; }
"""
InterPolate = r"""
function interpolate(fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
}
"""
PluralIdx = r"""
function pluralidx(n) {
var v=%s;
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
}
"""
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
src = [NullSource, InterPolate, LibFormatHead, get_formats(), LibFormatFoot]
return http.HttpResponse(''.join(src), 'text/javascript')
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
if request.GET:
if 'language' in request.GET:
if check_for_language(request.GET['language']):
activate(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
default_locale = to_locale(settings.LANGUAGE_CODE)
locale = to_locale(get_language())
t = {}
paths = []
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# last load the currently selected language, if it isn't identical to the default.
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
src = [LibHead]
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':',1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=',1)[1]
src.append(PluralIdx % plural)
else:
src.append(SimplePlural)
csrc = []
pdict = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
csrc.append("catalog['%s'] = '%s';\n" % (javascript_quote(k), javascript_quote(v)))
elif isinstance(k, tuple):
if k[0] not in pdict:
pdict[k[0]] = k[1]
else:
pdict[k[0]] = max(k[1], pdict[k[0]])
csrc.append("catalog['%s'][%d] = '%s';\n" % (javascript_quote(k[0]), k[1], javascript_quote(v)))
else:
raise TypeError(k)
csrc.sort()
for k, v in pdict.items():
src.append("catalog['%s'] = [%s];\n" % (javascript_quote(k), ','.join(["''"]*(v+1))))
src.extend(csrc)
src.append(LibFoot)
src.append(InterPolate)
src.append(LibFormatHead)
src.append(get_formats())
src.append(LibFormatFoot)
src = ''.join(src)
return http.HttpResponse(src, 'text/javascript')
| |
#!/usr/bin/env python
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
##
# Title :lst_creator.py
#
# Script to create a working list file from the test overlay directory. This
# will be used by smarty to run tests.
#
################################################################################
import argparse
import datetime
import os
import re
import sys
from collections import defaultdict
################################################################################
# Argument Parser
################################################################################
DESCRIPTION = """Python script to help create/update the arm64 lstFile
"""
PARSER = argparse.ArgumentParser(description=DESCRIPTION)
PARSER.add_argument("--test", dest="testing", action="store_true", default=False)
PARSER.add_argument("-lst_file", dest="old_list_file", nargs='?', default=None)
PARSER.add_argument("-test_dir", dest="test_dir", nargs='?', default=None)
PARSER.add_argument("-commit_hash", dest="commit_hash", nargs='?', default=None)
PARSER.add_argument("-failures_csv", dest="failures_csv", nargs='?', default=None)
ARGS = PARSER.parse_args(sys.argv[1:])
################################################################################
# Helper Functions
################################################################################
def create_list_file(file_name, metadata):
""" Create a lstFile given a set of metadata input
Args:
file_name (str): Location to write the lstFile
metadata ({ str: { str: str } }): Dictionary mapping test name to
: a dictionary of key/value
: attributes.
"""
current_time = datetime.datetime.now()
current_time_str = current_time.strftime("%d-%b-%Y %H:%M:%S%z")
metadata = [metadata[item] for item in metadata]
metadata = sorted(metadata, key=lambda item: item[1])
new_metadata = [item for item in metadata if item[1] == -1]
old_metadata = [item for item in metadata if item[1] != -1]
with open(file_name, "w") as file_handle:
file_handle.write("## This list file has been produced automatically. Any changes\n")
file_handle.write("## are subject to being overwritten when reproducing this file.\n")
file_handle.write("## \n")
file_handle.write("## Last Updated: %s\n" % current_time_str)
file_handle.write("## Commit: %s\n" % ARGS.commit_hash)
file_handle.write("## \n")
order = ["RelativePath", "WorkingDir", "Expected",
"MaxAllowedDurationSeconds", "Categories", "HostStyle"]
def write_metadata(data, count=None):
for item in data:
test_name = item[0]["RelativePath"]
if item[1] != -1:
count = item[1]
item = item[0]
# Get the test name.
title = "[%s_%d]" % (test_name.split("\\")[-1], count)
count += 1
file_handle.write("%s\n" % title)
attribute_str = ""
for key in order:
attribute_str += "%s=%s\n" % (key, item[key])
file_handle.write(attribute_str + "\n")
write_metadata(old_metadata)
write_metadata(new_metadata, old_metadata[-1][1] + 1)
def create_metadata(tests):
""" Given a set of tests create the metadata around them
Args:
tests ([str]): List of tests for which to determine metadata
Returns:
test_metadata ({ str: { str: str } }): Dictionary mapping test name to
: a dictionary of key/value
: attributes.
"""
test_metadata = defaultdict(lambda: None)
failures_csv = ARGS.failures_csv
failure_information = defaultdict(lambda: None)
if failures_csv is not None:
lines = []
assert(os.path.isfile(failures_csv))
with open(failures_csv, "r") as file_handle:
lines = file_handle.readlines()
try:
for line in lines:
split = line.split(",")
relative_path = split[0].replace("/", "\\")
category = split[1]
failure_information[relative_path] = category.strip()
except:
raise Exception("Error. CSV format expects: relativepath,category")
for test in tests:
working_directory = os.path.dirname(test).replace("/", "\\")
# Make sure the tests use the windows \ seperator.
relative_path = test.replace("/", "\\")
max_duration = "600"
categories = "EXPECTED_PASS"
expected = "0"
host_style = "0"
metadata = defaultdict(lambda: None)
metadata["RelativePath"] = relative_path
metadata["WorkingDir"] = working_directory
metadata["MaxAllowedDurationSeconds"] = max_duration
metadata["HostStyle"] = host_style
metadata["Expected"] = expected
metadata["Categories"] = categories
if failure_information[relative_path] is not None:
metadata["Categories"] = failure_information[relative_path]
test_metadata[relative_path] = metadata
return test_metadata
def get_all_tests(base_dir):
""" Find all of the tests in the enlistment
Args:
base_dir (str): Directory to start traversing from
Returns:
test_list ([str]): List of the tests. Note this is defined to be every
: cmd file under the base_dir.
Note:
To find the tests correctly you must build the tests correctly and
pass that directory. This method will NOT check to make sure that
this has been done correctly.
This is a recursive method.
"""
def get_all_tests_helper(working_dir):
""" Helper function to recursively get all tests.
"""
assert os.path.isdir(working_dir)
items = os.listdir(working_dir)
items = [os.path.join(working_dir, item) for item in items]
dirs = [item for item in items if os.path.isdir(item)]
tests = [item for item in items if ".cmd" in item]
for item in dirs:
tests += get_all_tests_helper(item)
return tests
# Recursively get all of the tests in the directory.
tests = get_all_tests_helper(base_dir)
# Find the correct base directory for the tests.
common_prefix = os.path.commonprefix(tests)
if common_prefix is not None:
tests = [test.replace(common_prefix, "") for test in tests]
return tests
def log(message):
""" Log a debug message. This is to be used when the --test option is passed
"""
if ARGS.testing is True:
print message
def parse_lst_file(lst_file):
"""Parse a lstFile given.
Args:
lst_file(str): location of the lstFile
Returns:
test_metadata (defaultdict(lambda: None)): Key is test name.
"""
assert os.path.isfile(lst_file)
contents = None
with open(lst_file) as file_handle:
contents = file_handle.read()
split = re.split("\[(.*?)\]", contents)
unique_name = None
test_metadata = defaultdict(lambda: None)
for item in split:
if len(item) == 0 or item[0] == "#":
continue
if unique_name is None:
unique_name = item
else:
index = int(unique_name.split("_")[-1])
metadata = defaultdict(lambda: None)
attributes = item.split(os.linesep)
for attribute in attributes:
# Skip the removed new lines.
if len(attribute) == 0:
continue
pair = attribute.split("=")
key = pair[0].strip()
value = pair[1].strip()
metadata[key] = value
# Relative path is unique, while the test name alone is not.
unique_name = metadata["RelativePath"]
test_metadata[unique_name] = (metadata, index)
unique_name = None
return test_metadata
################################################################################
# Main
################################################################################
def main(args):
""" Main method
Args:
args ([str]): the arugments passed to the program.
"""
# Assign all of the passed variables.
test_dir = args.test_dir
old_list_file = args.old_list_file
commit_hash = args.commit_hash
if commit_hash is None:
print "Error please provide a commit hash."
sys.exit(1)
if test_dir is None or not os.path.isdir(test_dir):
print "Error the test directory passed is not a valid directory."
sys.exit(1)
tests = get_all_tests(test_dir)
print "Found %d tests in the test directory." % (len(tests))
print
old_test_metadata = None
# If we are updating an old lstFile. Get all of the tests from that
# lstFile and their metadata.
if old_list_file is not None:
old_test_metadata = parse_lst_file(old_list_file)
print "Found %d tests in the old lstFile." % (len(old_test_metadata))
test_metadata = create_metadata(tests)
if old_test_metadata is not None:
# If the new information has been changed, we will need to update
# the lstFile.
new_test_count = 0
update_count = 0
for test_name in test_metadata:
new_metadata = test_metadata[test_name]
old_metadata = old_test_metadata[test_name]
attributes = None
if old_test_metadata[test_name] is None:
new_test_count += 1
new_metadata["Categories"] += ";NEW"
old_test_metadata[test_name] = (new_metadata, -1)
else:
index = old_metadata[1]
old_metadata = old_metadata[0]
attributes = set(old_metadata.keys() + new_metadata.keys())
# Make sure we go through all attributes of both sets.
# If an attribute exists in one set but not the other it will
# be None. If the new metadata has a new attribute, write this
# into the old metadata. If the old metadata has an attribute
# that does not exist in the new set. Do not remove it.
overwritten = False
for attribute in attributes:
if attribute == "MaxAllowedDurationSeconds":
continue
if attribute == "Categories":
new_split = new_metadata["Categories"].split(";")
old_split = old_metadata["Categories"].split(";")
if "NEW" in old_split:
old_split.remove("NEW")
# If an old test is marked as a failing test. Make
# sure that we carry that information along.
if "EXPECTED_PASS" in new_split and "EXPECTED_FAIL" in old_split:
new_split.remove("EXPECTED_PASS")
# If it used to be marked as pass but it is now failing. Make sure
# we remove the old category.
elif "EXPECTED_FAIL" in new_split and "EXPECTED_PASS" in old_split:
old_split.remove("EXPECTED_PASS")
joined_categories = set(old_split + new_split)
overwritten = True
old_metadata[attribute] = ";".join(joined_categories)
old_test_metadata[test_name] = (old_metadata, index)
elif new_metadata[attribute] != old_metadata[attribute]:
# If the old information is not the same as the new
# information, keep the new information. overwrite the old
# metadata.
if new_metadata[attribute] is not None:
overwritten = True
old_metadata[attribute] = new_metadata[attribute]
old_test_metadata[test_name] = (old_metadata, index)
if overwritten:
update_count += 1
print "Added %d tests." % new_test_count
print "Finished join. %d tests updated." % update_count
test_metadata = old_test_metadata
# Overwrite the old file if provided, else use the generic name Tests.lst
lst_file = "Tests.lst" if old_list_file is None else old_list_file
# Write out the new lstFile
create_list_file(lst_file, test_metadata)
################################################################################
################################################################################
if __name__ == "__main__":
main(ARGS)
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTObject
class NUNSGPatchProfile(NURESTObject):
""" Represents a NSGPatchProfile in the VSD
Notes:
An NSG Patch Profile contains upgrade information that can be given to an NSG Instance. The profile contains details on where the NSG can retrieve the image to upgrade to, and some criteria related to when the upgrade is to happen once the NSG device has received the information for upgrading.
"""
__rest_name__ = "nsgpatchprofile"
__resource_name__ = "nsgpatchprofiles"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a NSGPatchProfile instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> nsgpatchprofile = NUNSGPatchProfile(id=u'xxxx-xxx-xxx-xxx', name=u'NSGPatchProfile')
>>> nsgpatchprofile = NUNSGPatchProfile(data=my_dict)
"""
super(NUNSGPatchProfile, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._patch_tag = None
self._patch_url = None
self._description = None
self._enterprise_id = None
self._entity_scope = None
self._creation_date = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="patch_tag", remote_name="patchTag", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="patch_url", remote_name="patchURL", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
A unique name identifying this patch profile.
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
A unique name identifying this patch profile.
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def patch_tag(self):
""" Get patch_tag value.
Notes:
A unique brief name and version of the patch derived from Patch URL.
This attribute is named `patchTag` in VSD API.
"""
return self._patch_tag
@patch_tag.setter
def patch_tag(self, value):
""" Set patch_tag value.
Notes:
A unique brief name and version of the patch derived from Patch URL.
This attribute is named `patchTag` in VSD API.
"""
self._patch_tag = value
@property
def patch_url(self):
""" Get patch_url value.
Notes:
URL to retrieve the patch bundle for this particular patch.
This attribute is named `patchURL` in VSD API.
"""
return self._patch_url
@patch_url.setter
def patch_url(self, value):
""" Set patch_url value.
Notes:
URL to retrieve the patch bundle for this particular patch.
This attribute is named `patchURL` in VSD API.
"""
self._patch_url = value
@property
def description(self):
""" Get description value.
Notes:
A brief description of this patch profile.
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A brief description of this patch profile.
"""
self._description = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
Enterprise/Organisation that this patch profile belongs to.
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
Enterprise/Organisation that this patch profile belongs to.
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import sys
import traceback
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
class RPCException(Exception):
message = _("An unknown RPC related exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message
super(RPCException, self).__init__(message)
class RemoteError(RPCException):
"""Signifies that a remote class has raised an exception.
Contains a string representation of the type of the original exception,
the value of the original exception, and the traceback. These are
sent to the parent as a joined string so printing the exception
contains all of the relevant info.
"""
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type
self.value = value
self.traceback = traceback
super(RemoteError, self).__init__(exc_type=exc_type,
value=value,
traceback=traceback)
class Timeout(RPCException):
"""Signifies that a timeout has occurred.
This exception is raised if the rpc_response_timeout is reached while
waiting for a response from the remote side.
"""
message = _("Timeout while waiting on RPC response.")
class InvalidRPCConnectionReuse(RPCException):
message = _("Invalid reuse of an RPC connection.")
class UnsupportedRpcVersion(RPCException):
message = _("Specified RPC version, %(version)s, not supported by "
"this endpoint.")
class Connection(object):
"""A connection, returned by rpc.create_connection().
This class represents a connection to the message bus used for rpc.
An instance of this class should never be created by users of the rpc API.
Use rpc.create_connection() instead.
"""
def close(self):
"""Close the connection.
This method must be called when the connection will no longer be used.
It will ensure that any resources associated with the connection, such
as a network connection, and cleaned up.
"""
raise NotImplementedError()
def create_consumer(self, conf, topic, proxy, fanout=False):
"""Create a consumer on this connection.
A consumer is associated with a message queue on the backend message
bus. The consumer will read messages from the queue, unpack them, and
dispatch them to the proxy object. The contents of the message pulled
off of the queue will determine which method gets called on the proxy
object.
:param conf: An openstack.common.cfg configuration object.
:param topic: This is a name associated with what to consume from.
Multiple instances of a service may consume from the same
topic. For example, all instances of nova-compute consume
from a queue called "compute". In that case, the
messages will get distributed amongst the consumers in a
round-robin fashion if fanout=False. If fanout=True,
every consumer associated with this topic will get a
copy of every message.
:param proxy: The object that will handle all incoming messages.
:param fanout: Whether or not this is a fanout topic. See the
documentation for the topic parameter for some
additional comments on this.
"""
raise NotImplementedError()
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
Spawn a thread that will be responsible for handling all incoming
messages for consumers that were set up on this connection.
Message dispatching inside of this is expected to be implemented in a
non-blocking manner. An example implementation would be having this
thread pull messages in for all of the consumers, but utilize a thread
pool for dispatching the messages to the proxy objects.
"""
raise NotImplementedError()
def _safe_log(log_func, msg, msg_data):
"""Sanitizes the msg_data field before logging."""
SANITIZE = {
'set_admin_password': ('new_pass',),
'run_instance': ('admin_password',),
}
has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
has_context_token = '_context_auth_token' in msg_data
has_token = 'auth_token' in msg_data
if not any([has_method, has_context_token, has_token]):
return log_func(msg, msg_data)
msg_data = copy.deepcopy(msg_data)
if has_method:
method = msg_data['method']
if method in SANITIZE:
args_to_sanitize = SANITIZE[method]
for arg in args_to_sanitize:
try:
msg_data['args'][arg] = "<SANITIZED>"
except KeyError:
pass
if has_context_token:
msg_data['_context_auth_token'] = '<SANITIZED>'
if has_token:
msg_data['auth_token'] = '<SANITIZED>'
return log_func(msg, msg_data)
def serialize_remote_exception(failure_info):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
LOG.error(_("Returning exception %s to caller"), unicode(failure))
LOG.error(tb)
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
data = {
'class': str(failure.__class__.__name__),
'module': str(failure.__class__.__module__),
'message': unicode(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
def deserialize_remote_exception(conf, data):
failure = jsonutils.loads(str(data))
trace = failure.get('tb', [])
message = failure.get('message', "") + "\n" + "\n".join(trace)
name = failure.get('class')
module = failure.get('module')
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
if not module in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
mod = importutils.import_module(module)
klass = getattr(mod, name)
if not issubclass(klass, Exception):
raise TypeError("Can only deserialize Exceptions")
failure = klass(**failure.get('kwargs', {}))
except (AttributeError, TypeError, ImportError):
return RemoteError(name, failure.get('message'), trace)
ex_type = type(failure)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override})
try:
# NOTE(ameade): Dynamically create a new exception type and swap it in
# as the new type for the exception. This only works on user defined
# Exceptions and not core python exceptions. This is important because
# we cannot necessarily change an exception message so we must override
# the __str__ method.
failure.__class__ = new_ex_type
except TypeError as e:
# NOTE(ameade): If a core exception then just add the traceback to the
# first exception argument.
failure.args = (message,) + failure.args[1:]
return failure
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.