repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
jameshensman/pymc3
pymc3/tests/backend_fixtures.py
13
15728
import unittest import numpy as np import numpy.testing as npt import os import shutil from pymc3.tests import models from pymc3.backends import base class ModelBackendSetupTestCase(unittest.TestCase): """Set up a backend trace. Provides the attributes - test_point - model - strace - draws Children must define - backend - name - shape """ def setUp(self): self.test_point, self.model, _ = models.beta_bernoulli(self.shape) with self.model: self.strace = self.backend(self.name) self.draws, self.chain = 3, 0 self.strace.setup(self.draws, self.chain) def tearDown(self): if self.name is not None: remove_file_or_directory(self.name) class ModelBackendSampledTestCase(unittest.TestCase): """Setup and sample a backend trace. Provides the attributes - test_point - model - mtrace (MultiTrace object) - draws - expected Expected values mapped to chain number and variable name. Children must define - backend - name - shape """ @classmethod def setUpClass(cls): cls.test_point, cls.model, _ = models.beta_bernoulli(cls.shape) with cls.model: strace0 = cls.backend(cls.name) strace1 = cls.backend(cls.name) cls.draws = 5 strace0.setup(cls.draws, chain=0) strace1.setup(cls.draws, chain=1) varnames = list(cls.test_point.keys()) shapes = {varname: value.shape for varname, value in cls.test_point.items()} dtypes = {varname: value.dtype for varname, value in cls.test_point.items()} cls.expected = {0: {}, 1: {}} for varname in varnames: mcmc_shape = (cls.draws,) + shapes[varname] values = np.arange(cls.draws * np.prod(shapes[varname]), dtype=dtypes[varname]) cls.expected[0][varname] = values.reshape(mcmc_shape) cls.expected[1][varname] = values.reshape(mcmc_shape) * 100 for idx in range(cls.draws): point0 = {varname: cls.expected[0][varname][idx, ...] for varname in varnames} point1 = {varname: cls.expected[1][varname][idx, ...] for varname in varnames} strace0.record(point=point0) strace1.record(point=point1) strace0.close() strace1.close() cls.mtrace = base.MultiTrace([strace0, strace1]) @classmethod def tearDownClass(cls): if cls.name is not None: remove_file_or_directory(cls.name) def test_varnames_nonempty(self): # Make sure the test_point has variables names because many # tests rely on looping through these and would pass silently # if the loop is never entered. assert list(self.test_point.keys()) class SamplingTestCase(ModelBackendSetupTestCase): """Test backend sampling. Children must define - backend - name - shape """ def test_standard_close(self): for idx in range(self.draws): point = {varname: np.tile(idx, value.shape) for varname, value in self.test_point.items()} self.strace.record(point=point) self.strace.close() for varname in self.test_point.keys(): npt.assert_equal(self.strace.get_values(varname)[0, ...], np.zeros(self.strace.var_shapes[varname])) last_idx = self.draws - 1 npt.assert_equal(self.strace.get_values(varname)[last_idx, ...], np.tile(last_idx, self.strace.var_shapes[varname])) def test_clean_interrupt(self): self.strace.record(point=self.test_point) self.strace.close() for varname in self.test_point.keys(): self.assertEqual(self.strace.get_values(varname).shape[0], 1) class SelectionTestCase(ModelBackendSampledTestCase): """Test backend selection. Children must define - backend - name - shape """ def test_get_values_default(self): for varname in self.test_point.keys(): expected = np.concatenate([self.expected[chain][varname] for chain in [0, 1]]) result = self.mtrace.get_values(varname) npt.assert_equal(result, expected) def test_get_values_nocombine_burn_keyword(self): burn = 2 for varname in self.test_point.keys(): expected = [self.expected[0][varname][burn:], self.expected[1][varname][burn:]] result = self.mtrace.get_values(varname, burn=burn, combine=False) npt.assert_equal(result, expected) def test_len(self): self.assertEqual(len(self.mtrace), self.draws) def test_dtypes(self): for varname in self.test_point.keys(): self.assertEqual(self.expected[0][varname].dtype, self.mtrace.get_values(varname, chains=0).dtype) def test_get_values_nocombine_thin_keyword(self): thin = 2 for varname in self.test_point.keys(): expected = [self.expected[0][varname][::thin], self.expected[1][varname][::thin]] result = self.mtrace.get_values(varname, thin=thin, combine=False) npt.assert_equal(result, expected) def test_get_point(self): idx = 2 result = self.mtrace.point(idx) for varname in self.test_point.keys(): expected = self.expected[1][varname][idx] npt.assert_equal(result[varname], expected) def test_get_slice(self): expected = [] for chain in [0, 1]: expected.append({varname: self.expected[chain][varname][2:] for varname in self.mtrace.varnames}) result = self.mtrace[2:] for chain in [0, 1]: for varname in self.test_point.keys(): npt.assert_equal(result.get_values(varname, chains=[chain]), expected[chain][varname]) def test_get_values_one_chain(self): for varname in self.test_point.keys(): expected = self.expected[0][varname] result = self.mtrace.get_values(varname, chains=[0]) npt.assert_equal(result, expected) def test_get_values_nocombine_chains_reversed(self): for varname in self.test_point.keys(): expected = [self.expected[1][varname], self.expected[0][varname]] result = self.mtrace.get_values(varname, chains=[1, 0], combine=False) npt.assert_equal(result, expected) def test_nchains(self): self.mtrace.nchains == 2 def test_get_values_one_chain_int_arg(self): for varname in self.test_point.keys(): npt.assert_equal(self.mtrace.get_values(varname, chains=[0]), self.mtrace.get_values(varname, chains=0)) def test_get_values_combine(self): for varname in self.test_point.keys(): expected = np.concatenate([self.expected[chain][varname] for chain in [0, 1]]) result = self.mtrace.get_values(varname, combine=True) npt.assert_equal(result, expected) def test_get_values_combine_burn_arg(self): burn = 2 for varname in self.test_point.keys(): expected = np.concatenate([self.expected[chain][varname][burn:] for chain in [0, 1]]) result = self.mtrace.get_values(varname, combine=True, burn=burn) npt.assert_equal(result, expected) def test_get_values_combine_thin_arg(self): thin = 2 for varname in self.test_point.keys(): expected = np.concatenate([self.expected[chain][varname][::thin] for chain in [0, 1]]) result = self.mtrace.get_values(varname, combine=True, thin=thin) npt.assert_equal(result, expected) def test_getitem_equivalence(self): mtrace = self.mtrace for varname in self.test_point.keys(): npt.assert_equal(mtrace[varname], mtrace.get_values(varname, combine=True)) npt.assert_equal(mtrace[varname, 2:], mtrace.get_values(varname, burn=2, combine=True)) npt.assert_equal(mtrace[varname, 2::2], mtrace.get_values(varname, burn=2, thin=2, combine=True)) def test_selection_method_equivalence(self): varname = self.mtrace.varnames[0] mtrace = self.mtrace npt.assert_equal(mtrace.get_values(varname), mtrace[varname]) npt.assert_equal(mtrace[varname], mtrace.__getattr__(varname)) class DumpLoadTestCase(ModelBackendSampledTestCase): """Test equality of a dumped and loaded trace with original. Children must define - backend - load_func Function to load dumped backend - name - shape """ @classmethod def setUpClass(cls): super(DumpLoadTestCase, cls).setUpClass() try: with cls.model: cls.dumped = cls.load_func(cls.name) except: remove_file_or_directory(cls.name) raise @classmethod def tearDownClass(cls): remove_file_or_directory(cls.name) def test_nchains(self): self.assertEqual(self.mtrace.nchains, self.dumped.nchains) def test_varnames(self): trace_names = list(sorted(self.mtrace.varnames)) dumped_names = list(sorted(self.dumped.varnames)) self.assertEqual(trace_names, dumped_names) def test_values(self): trace = self.mtrace dumped = self.dumped for chain in trace.chains: for varname in self.test_point.keys(): data = trace.get_values(varname, chains=[chain]) dumped_data = dumped.get_values(varname, chains=[chain]) npt.assert_equal(data, dumped_data) class BackendEqualityTestCase(ModelBackendSampledTestCase): """Test equality of attirbutes from two backends. Children must define - backend0 - backend1 - name0 - name1 - shape """ @classmethod def setUpClass(cls): cls.backend = cls.backend0 cls.name = cls.name0 super(BackendEqualityTestCase, cls).setUpClass() cls.mtrace0 = cls.mtrace cls.backend = cls.backend1 cls.name = cls.name1 super(BackendEqualityTestCase, cls).setUpClass() cls.mtrace1 = cls.mtrace @classmethod def tearDownClass(cls): for name in [cls.name0, cls.name1]: if name is not None: remove_file_or_directory(name) def test_chain_length(self): assert self.mtrace0.nchains == self.mtrace1.nchains assert len(self.mtrace0) == len(self.mtrace1) def test_dtype(self): for varname in self.test_point.keys(): self.assertEqual(self.mtrace0.get_values(varname, chains=0).dtype, self.mtrace1.get_values(varname, chains=0).dtype) def test_number_of_draws(self): for varname in self.test_point.keys(): values0 = self.mtrace0.get_values(varname, combine=False, squeeze=False) values1 = self.mtrace1.get_values(varname, combine=False, squeeze=False) assert values0[0].shape[0] == self.draws assert values1[0].shape[0] == self.draws def test_get_item(self): for varname in self.test_point.keys(): npt.assert_equal(self.mtrace0[varname], self.mtrace1[varname]) def test_get_values(self): for varname in self.test_point.keys(): for cf in [False, True]: npt.assert_equal(self.mtrace0.get_values(varname, combine=cf), self.mtrace1.get_values(varname, combine=cf)) def test_get_values_no_squeeze(self): for varname in self.test_point.keys(): npt.assert_equal(self.mtrace0.get_values(varname, combine=False, squeeze=False), self.mtrace1.get_values(varname, combine=False, squeeze=False)) def test_get_values_combine_and_no_squeeze(self): for varname in self.test_point.keys(): npt.assert_equal(self.mtrace0.get_values(varname, combine=True, squeeze=False), self.mtrace1.get_values(varname, combine=True, squeeze=False)) def test_get_values_with_burn(self): for varname in self.test_point.keys(): for cf in [False, True]: npt.assert_equal(self.mtrace0.get_values(varname, combine=cf, burn=3), self.mtrace1.get_values(varname, combine=cf, burn=3)) ## Burn to one value. npt.assert_equal(self.mtrace0.get_values(varname, combine=cf, burn=self.draws - 1), self.mtrace1.get_values(varname, combine=cf, burn=self.draws - 1)) def test_get_values_with_thin(self): for varname in self.test_point.keys(): for cf in [False, True]: npt.assert_equal(self.mtrace0.get_values(varname, combine=cf, thin=2), self.mtrace1.get_values(varname, combine=cf, thin=2)) def test_get_values_with_burn_and_thin(self): for varname in self.test_point.keys(): for cf in [False, True]: npt.assert_equal(self.mtrace0.get_values(varname, combine=cf, burn=2, thin=2), self.mtrace1.get_values(varname, combine=cf, burn=2, thin=2)) def test_get_values_with_chains_arg(self): for varname in self.test_point.keys(): for cf in [False, True]: npt.assert_equal(self.mtrace0.get_values(varname, chains=[0], combine=cf), self.mtrace1.get_values(varname, chains=[0], combine=cf)) def test_get_point(self): npoint, spoint = self.mtrace0[4], self.mtrace1[4] for varname in self.test_point.keys(): npt.assert_equal(npoint[varname], spoint[varname]) def test_point_with_chain_arg(self): npoint = self.mtrace0.point(4, chain=0) spoint = self.mtrace1.point(4, chain=0) for varname in self.test_point.keys(): npt.assert_equal(npoint[varname], spoint[varname]) def remove_file_or_directory(name): try: os.remove(name) except OSError: shutil.rmtree(name, ignore_errors=True)
apache-2.0
hugobowne/scikit-learn
sklearn/feature_selection/tests/test_feature_select.py
43
24671
""" Todo: cross-check the F-value with stats model """ from __future__ import division import itertools import warnings import numpy as np from scipy import stats, sparse from numpy.testing import run_module_suite from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_not_in from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils import safe_mask from sklearn.datasets.samples_generator import (make_classification, make_regression) from sklearn.feature_selection import ( chi2, f_classif, f_oneway, f_regression, mutual_info_classif, mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr, SelectFdr, SelectFwe, GenericUnivariateSelect) ############################################################################## # Test the score functions def test_f_oneway_vs_scipy_stats(): # Test that our f_oneway gives the same result as scipy.stats rng = np.random.RandomState(0) X1 = rng.randn(10, 3) X2 = 1 + rng.randn(10, 3) f, pv = stats.f_oneway(X1, X2) f2, pv2 = f_oneway(X1, X2) assert_true(np.allclose(f, f2)) assert_true(np.allclose(pv, pv2)) def test_f_oneway_ints(): # Smoke test f_oneway on integers: that it does raise casting errors # with recent numpys rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 10)) y = np.arange(10) fint, pint = f_oneway(X, y) # test that is gives the same result as with float f, p = f_oneway(X.astype(np.float), y) assert_array_almost_equal(f, fint, decimal=4) assert_array_almost_equal(p, pint, decimal=4) def test_f_classif(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression(): # Test whether the F test yields meaningful results # on a simple simulated regression problem X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) F, pv = f_regression(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) # again without centering, compare with sparse F, pv = f_regression(X, y, center=False) F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression_input_dtype(): # Test whether f_regression returns the same value # for any numeric data_type rng = np.random.RandomState(0) X = rng.rand(10, 20) y = np.arange(10).astype(np.int) F1, pv1 = f_regression(X, y) F2, pv2 = f_regression(X, y.astype(np.float)) assert_array_almost_equal(F1, F2, 5) assert_array_almost_equal(pv1, pv2, 5) def test_f_regression_center(): # Test whether f_regression preserves dof according to 'center' argument # We use two centered variates so we have a simple relationship between # F-score with variates centering and F-score without variates centering. # Create toy example X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean n_samples = X.size Y = np.ones(n_samples) Y[::2] *= -1. Y[0] = 0. # have Y mean being null F1, _ = f_regression(X, Y, center=True) F2, _ = f_regression(X, Y, center=False) assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2) assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS def test_f_classif_multi_class(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) def test_select_percentile_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_percentile_classif_sparse(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) X = sparse.csr_matrix(X) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r.toarray(), X_r2.toarray()) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_r2inv = univariate_filter.inverse_transform(X_r2) assert_true(sparse.issparse(X_r2inv)) support_mask = safe_mask(X_r2inv, support) assert_equal(X_r2inv.shape, X.shape) assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) # Check other columns are empty assert_equal(X_r2inv.getnnz(), X_r.getnnz()) ############################################################################## # Test univariate selection in classification settings def test_select_kbest_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the k best heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=5) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_classif, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_kbest_all(): # Test whether k="all" correctly returns all features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k='all') X_r = univariate_filter.fit(X, y).transform(X) assert_array_equal(X, X_r) def test_select_kbest_zero(): # Test whether k=0 correctly returns no features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=0) univariate_filter.fit(X, y) support = univariate_filter.get_support() gtruth = np.zeros(10, dtype=bool) assert_array_equal(support, gtruth) X_selected = assert_warns_message(UserWarning, 'No features were selected', univariate_filter.transform, X) assert_equal(X_selected.shape, (20, 0)) def test_select_heuristics_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the fdr, fwe and fpr heuristics X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_classif, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_classif, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_almost_equal(support, gtruth) ############################################################################## # Test univariate selection in regression settings def assert_best_scores_kept(score_filter): scores = score_filter.scores_ support = score_filter.get_support() assert_array_equal(np.sort(scores[support]), np.sort(scores)[-support.sum():]) def test_select_percentile_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the percentile heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_2 = X.copy() X_2[:, np.logical_not(support)] = 0 assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) # Check inverse_transform respects dtype assert_array_equal(X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool))) def test_select_percentile_regression_full(): # Test whether the relative univariate feature selection # selects all features when '100%' is asked. X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=100) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=100).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.ones(20) assert_array_equal(support, gtruth) def test_invalid_percentile(): X, y = make_regression(n_samples=10, n_features=20, n_informative=2, shuffle=False, random_state=0) assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y) assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=101).fit, X, y) def test_select_kbest_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the k best heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectKBest(f_regression, k=5) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_heuristics_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fpr, fdr or fwe heuristics X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectFpr(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_regression, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 3) def test_select_fdr_regression(): # Test that fdr heuristic actually has low FDR. def single_fdr(alpha, n_informative, random_state): X, y = make_regression(n_samples=150, n_features=20, n_informative=n_informative, shuffle=False, random_state=random_state, noise=10) with warnings.catch_warnings(record=True): # Warnings can be raised when no features are selected # (low alpha or very noisy data) univariate_filter = SelectFdr(f_regression, alpha=alpha) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fdr', param=alpha).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() num_false_positives = np.sum(support[n_informative:] == 1) num_true_positives = np.sum(support[:n_informative] == 1) if num_false_positives == 0: return 0. false_discovery_rate = (num_false_positives / (num_true_positives + num_false_positives)) return false_discovery_rate for alpha in [0.001, 0.01, 0.1]: for n_informative in [1, 5, 10]: # As per Benjamini-Hochberg, the expected false discovery rate # should be lower than alpha: # FDR = E(FP / (TP + FP)) <= alpha false_discovery_rate = np.mean([single_fdr(alpha, n_informative, random_state) for random_state in range(30)]) assert_greater_equal(alpha, false_discovery_rate) # Make sure that the empirical false discovery rate increases # with alpha: if false_discovery_rate != 0: assert_greater(false_discovery_rate, alpha / 10) def test_select_fwe_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fwe heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fwe', param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 2) def test_selectkbest_tiebreaking(): # Test whether SelectKBest actually selects k features in case of ties. # Prior to 0.11, SelectKBest would return more features than requested. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectKBest(dummy_score, k=1) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectKBest(dummy_score, k=2) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_selectpercentile_tiebreaking(): # Test if SelectPercentile selects the right n_features in case of ties. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectPercentile(dummy_score, percentile=34) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectPercentile(dummy_score, percentile=67) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_tied_pvalues(): # Test whether k-best and percentiles work with tied pvalues from chi2. # chi2 will return the same p-values for the following features, but it # will return different scores. X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) y = [0, 1] for perm in itertools.permutations((0, 1, 2)): X = X0[:, perm] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) def test_tied_scores(): # Test for stable sorting in k-best with tied scores. X_train = np.array([[0, 0, 0], [1, 1, 1]]) y_train = [0, 1] for n_features in [1, 2, 3]: sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) X_test = sel.transform([[0, 1, 2]]) assert_array_equal(X_test[0], np.arange(3)[-n_features:]) def test_nans(): # Assert that SelectKBest and SelectPercentile can handle NaNs. # First feature has zero variance to confuse f_classif (ANOVA) and # make it return a NaN. X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for select in (SelectKBest(f_classif, 2), SelectPercentile(f_classif, percentile=67)): ignore_warnings(select.fit)(X, y) assert_array_equal(select.get_support(indices=True), np.array([1, 2])) def test_score_func_error(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe, SelectFdr, SelectFpr, GenericUnivariateSelect]: assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y) def test_invalid_k(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] assert_raises(ValueError, SelectKBest(k=-1).fit, X, y) assert_raises(ValueError, SelectKBest(k=4).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=4).fit, X, y) def test_f_classif_constant_feature(): # Test that f_classif warns if a feature is constant throughout. X, y = make_classification(n_samples=10, n_features=5) X[:, 0] = 2.0 assert_warns(UserWarning, f_classif, X, y) def test_no_feature_selected(): rng = np.random.RandomState(0) # Generate random uncorrelated data: a strict univariate test should # rejects all the features X = rng.rand(40, 10) y = rng.randint(0, 4, size=40) strict_selectors = [ SelectFwe(alpha=0.01).fit(X, y), SelectFdr(alpha=0.01).fit(X, y), SelectFpr(alpha=0.01).fit(X, y), SelectPercentile(percentile=0).fit(X, y), SelectKBest(k=0).fit(X, y), ] for selector in strict_selectors: assert_array_equal(selector.get_support(), np.zeros(10)) X_selected = assert_warns_message( UserWarning, 'No features were selected', selector.transform, X) assert_equal(X_selected.shape, (40, 0)) def test_mutual_info_classif(): X, y = make_classification(n_samples=100, n_features=5, n_informative=1, n_redundant=1, n_repeated=0, n_classes=2, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_classif, k=2) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_classif, percentile=40) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(5) gtruth[:2] = 1 assert_array_equal(support, gtruth) def test_mutual_info_regression(): X, y = make_regression(n_samples=100, n_features=10, n_informative=2, shuffle=False, random_state=0, noise=10) # Test in KBest mode. univariate_filter = SelectKBest(mutual_info_regression, k=2) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) # Test in Percentile mode. univariate_filter = SelectPercentile(mutual_info_regression, percentile=20) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile', param=20).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(10) gtruth[:2] = 1 assert_array_equal(support, gtruth) if __name__ == '__main__': run_module_suite()
bsd-3-clause
pombredanne/plumbum
tests/test_color.py
5
3084
from __future__ import with_statement, print_function import unittest from plumbum.colorlib.styles import ANSIStyle, Color, AttributeNotFound, ColorNotFound from plumbum.colorlib.names import color_html, FindNearest class TestNearestColor(unittest.TestCase): def test_exact(self): self.assertEqual(FindNearest(0,0,0).all_fast(),0) for n,color in enumerate(color_html): # Ignoring duplicates if n not in (16, 21, 46, 51, 196, 201, 226, 231, 244): rgb = (int(color[1:3],16), int(color[3:5],16), int(color[5:7],16)) self.assertEqual(FindNearest(*rgb).all_fast(),n) def test_nearby(self): self.assertEqual(FindNearest(1,2,2).all_fast(),0) self.assertEqual(FindNearest(7,7,9).all_fast(),232) def test_simplecolor(self): self.assertEqual(FindNearest(1,2,4).only_basic(), 0) self.assertEqual(FindNearest(0,255,0).only_basic(), 2) self.assertEqual(FindNearest(100,100,0).only_basic(), 3) self.assertEqual(FindNearest(140,140,140).only_basic(), 7) class TestColorLoad(unittest.TestCase): def test_rgb(self): blue = Color(0,0,255) # Red, Green, Blue self.assertEqual(blue.rgb, (0,0,255)) def test_simple_name(self): green = Color.from_simple('green') self.assertEqual(green.number, 2) def test_different_names(self): self.assertEqual(Color('Dark Blue'), Color('Dark_Blue')) self.assertEqual(Color('Dark_blue'), Color('Dark_Blue')) self.assertEqual(Color('DARKBLUE'), Color('Dark_Blue')) self.assertEqual(Color('DarkBlue'), Color('Dark_Blue')) self.assertEqual(Color('Dark Green'), Color('Dark_Green')) def test_loading_methods(self): self.assertEqual(Color("Yellow"), Color.from_full("Yellow")) self.assertNotEqual(Color.from_full("yellow").representation, Color.from_simple("yellow").representation) class TestANSIColor(unittest.TestCase): def setUp(self): ANSIStyle.use_color = True def test_ansi(self): self.assertEqual(str(ANSIStyle(fgcolor=Color('reset'))), '\033[39m') self.assertEqual(str(ANSIStyle(fgcolor=Color.from_full('green'))), '\033[38;5;2m') self.assertEqual(str(ANSIStyle(fgcolor=Color.from_simple('red'))), '\033[31m') class TestStyle(unittest.TestCase): def setUp(self): ANSIStyle.use_color = True def test_InvalidAttributes(self): pass class TestNearestColor(unittest.TestCase): def test_allcolors(self): myrange = (0,1,2,5,17,39,48,73,82,140,193,210,240,244,250,254,255) for r in myrange: for g in myrange: for b in myrange: near = FindNearest(r,g,b) self.assertEqual(near.all_slow(),near.all_fast(), 'Tested: {0}, {1}, {2}'.format(r,g,b)) if __name__ == '__main__': unittest.main()
mit
plxaye/chromium
src/chrome/common/extensions/docs/server2/appengine_url_fetcher.py
2
1720
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import base64 from appengine_wrappers import urlfetch from future import Future class _AsyncFetchDelegate(object): def __init__(self, rpc): self._rpc = rpc def Get(self): return self._rpc.get_result() def _MakeHeaders(username, password): headers = { 'Cache-Control': 'max-age=0' } if username is not None and password is not None: headers['Authorization'] = 'Basic %s' % base64.encodestring( '%s:%s' % (username, password)) return headers class AppEngineUrlFetcher(object): """A wrapper around the App Engine urlfetch module that allows for easy async fetches. """ def __init__(self, base_path=None): self._base_path = base_path def Fetch(self, url, username=None, password=None): """Fetches a file synchronously. """ headers = _MakeHeaders(username, password) if self._base_path is not None: return urlfetch.fetch('%s/%s' % (self._base_path, url), headers=headers) else: return urlfetch.fetch(url, headers={ 'Cache-Control': 'max-age=0' }) def FetchAsync(self, url, username=None, password=None): """Fetches a file asynchronously, and returns a Future with the result. """ rpc = urlfetch.create_rpc() headers = _MakeHeaders(username, password) if self._base_path is not None: urlfetch.make_fetch_call(rpc, '%s/%s' % (self._base_path, url), headers=headers) else: urlfetch.make_fetch_call(rpc, url, headers=headers) return Future(delegate=_AsyncFetchDelegate(rpc))
apache-2.0
doug-wade/AlgorithmsGreatestHits
data_structures/queue.py
2
1514
class Queue: """ A simple implementation of a queue. """ def __init__(self): """ Initializes an empty queue. """ self._first = None self._last = None self._length = 0 class Node: """ A single element of the linked list. """ def __init__(self, value, next=None): self.value = value self.next = next def isEmpty(self): """ Returns True is the queue is empty, False otherwise. """ return self._first == None def size(self): """ Returns the number of elements in the queue currently """ return self._length def peek(self): """ Returns the next value that would be returned by dequeue. """ return self._last.value def dequeue(self): """ Removes and returns the least-recently-added node from the queue. """ if (self._length == 0): return None self._length -= 1 oldfirst = self._first self._first = oldfirst.next if (self.isEmpty()): self._last = None return oldfirst.value def enqueue(self, value): """ Adds a value to the queue. """ self._length += 1 oldlast = self._last self._last = self.Node(value, None) if (self.isEmpty()): self._first = self._last else: oldlast.next = self._last
mit
tseaver/gcloud-python
tasks/google/cloud/tasks_v2beta3/proto/queue_pb2.py
1
25736
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/tasks_v2beta3/proto/queue.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.tasks_v2beta3.proto import target_pb2 as google_dot_cloud_dot_tasks__v2beta3_dot_proto_dot_target__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='google/cloud/tasks_v2beta3/proto/queue.proto', package='google.cloud.tasks.v2beta3', syntax='proto3', serialized_pb=_b('\n,google/cloud/tasks_v2beta3/proto/queue.proto\x12\x1agoogle.cloud.tasks.v2beta3\x1a\x1cgoogle/api/annotations.proto\x1a-google/cloud/tasks_v2beta3/proto/target.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x9f\x03\n\x05Queue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12O\n\x15\x61pp_engine_http_queue\x18\x03 \x01(\x0b\x32..google.cloud.tasks.v2beta3.AppEngineHttpQueueH\x00\x12;\n\x0brate_limits\x18\x04 \x01(\x0b\x32&.google.cloud.tasks.v2beta3.RateLimits\x12=\n\x0cretry_config\x18\x05 \x01(\x0b\x32\'.google.cloud.tasks.v2beta3.RetryConfig\x12\x36\n\x05state\x18\x06 \x01(\x0e\x32\'.google.cloud.tasks.v2beta3.Queue.State\x12.\n\npurge_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"E\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\n\n\x06PAUSED\x10\x02\x12\x0c\n\x08\x44ISABLED\x10\x03\x42\x0c\n\nqueue_type\"j\n\nRateLimits\x12!\n\x19max_dispatches_per_second\x18\x01 \x01(\x01\x12\x16\n\x0emax_burst_size\x18\x02 \x01(\x05\x12!\n\x19max_concurrent_dispatches\x18\x03 \x01(\x05\"\xd1\x01\n\x0bRetryConfig\x12\x14\n\x0cmax_attempts\x18\x01 \x01(\x05\x12\x35\n\x12max_retry_duration\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bmin_backoff\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bmax_backoff\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x15\n\rmax_doublings\x18\x05 \x01(\x05\x42o\n\x1e\x63om.google.cloud.tasks.v2beta3B\nQueueProtoP\x01Z?google.golang.org/genproto/googleapis/cloud/tasks/v2beta3;tasksb\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_tasks__v2beta3_dot_proto_dot_target__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) _QUEUE_STATE = _descriptor.EnumDescriptor( name='State', full_name='google.cloud.tasks.v2beta3.Queue.State', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='STATE_UNSPECIFIED', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='RUNNING', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='PAUSED', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='DISABLED', index=3, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=551, serialized_end=620, ) _sym_db.RegisterEnumDescriptor(_QUEUE_STATE) _QUEUE = _descriptor.Descriptor( name='Queue', full_name='google.cloud.tasks.v2beta3.Queue', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='name', full_name='google.cloud.tasks.v2beta3.Queue.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='app_engine_http_queue', full_name='google.cloud.tasks.v2beta3.Queue.app_engine_http_queue', index=1, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='rate_limits', full_name='google.cloud.tasks.v2beta3.Queue.rate_limits', index=2, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='retry_config', full_name='google.cloud.tasks.v2beta3.Queue.retry_config', index=3, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='state', full_name='google.cloud.tasks.v2beta3.Queue.state', index=4, number=6, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='purge_time', full_name='google.cloud.tasks.v2beta3.Queue.purge_time', index=5, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ _QUEUE_STATE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='queue_type', full_name='google.cloud.tasks.v2beta3.Queue.queue_type', index=0, containing_type=None, fields=[]), ], serialized_start=219, serialized_end=634, ) _RATELIMITS = _descriptor.Descriptor( name='RateLimits', full_name='google.cloud.tasks.v2beta3.RateLimits', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='max_dispatches_per_second', full_name='google.cloud.tasks.v2beta3.RateLimits.max_dispatches_per_second', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='max_burst_size', full_name='google.cloud.tasks.v2beta3.RateLimits.max_burst_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='max_concurrent_dispatches', full_name='google.cloud.tasks.v2beta3.RateLimits.max_concurrent_dispatches', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=636, serialized_end=742, ) _RETRYCONFIG = _descriptor.Descriptor( name='RetryConfig', full_name='google.cloud.tasks.v2beta3.RetryConfig', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='max_attempts', full_name='google.cloud.tasks.v2beta3.RetryConfig.max_attempts', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='max_retry_duration', full_name='google.cloud.tasks.v2beta3.RetryConfig.max_retry_duration', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='min_backoff', full_name='google.cloud.tasks.v2beta3.RetryConfig.min_backoff', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='max_backoff', full_name='google.cloud.tasks.v2beta3.RetryConfig.max_backoff', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='max_doublings', full_name='google.cloud.tasks.v2beta3.RetryConfig.max_doublings', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=745, serialized_end=954, ) _QUEUE.fields_by_name['app_engine_http_queue'].message_type = google_dot_cloud_dot_tasks__v2beta3_dot_proto_dot_target__pb2._APPENGINEHTTPQUEUE _QUEUE.fields_by_name['rate_limits'].message_type = _RATELIMITS _QUEUE.fields_by_name['retry_config'].message_type = _RETRYCONFIG _QUEUE.fields_by_name['state'].enum_type = _QUEUE_STATE _QUEUE.fields_by_name['purge_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _QUEUE_STATE.containing_type = _QUEUE _QUEUE.oneofs_by_name['queue_type'].fields.append( _QUEUE.fields_by_name['app_engine_http_queue']) _QUEUE.fields_by_name['app_engine_http_queue'].containing_oneof = _QUEUE.oneofs_by_name['queue_type'] _RETRYCONFIG.fields_by_name['max_retry_duration'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION _RETRYCONFIG.fields_by_name['min_backoff'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION _RETRYCONFIG.fields_by_name['max_backoff'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION DESCRIPTOR.message_types_by_name['Queue'] = _QUEUE DESCRIPTOR.message_types_by_name['RateLimits'] = _RATELIMITS DESCRIPTOR.message_types_by_name['RetryConfig'] = _RETRYCONFIG _sym_db.RegisterFileDescriptor(DESCRIPTOR) Queue = _reflection.GeneratedProtocolMessageType('Queue', (_message.Message,), dict( DESCRIPTOR = _QUEUE, __module__ = 'google.cloud.tasks_v2beta3.proto.queue_pb2' , __doc__ = """A queue is a container of related tasks. Queues are configured to manage how those tasks are dispatched. Configurable properties include rate limits, retry options, queue types, and others. Attributes: name: Caller-specified and required in [CreateQueue][google.cloud.ta sks.v2beta3.CloudTasks.CreateQueue], after which it becomes output only. The queue name. The queue name must have the following format: ``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`` - ``PROJECT_ID`` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see `Identifying projects <https://cloud.google.com/resource-manager/docs/creating- managing-projects#identifying_projects>`_ - ``LOCATION_ID`` is the canonical ID for the queue's location. The list of available locations can be obtained by calling [ListLocatio ns][google.cloud.location.Locations.ListLocations]. For more information, see https://cloud.google.com/about/locations/. - ``QUEUE_ID`` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters. queue_type: Caller-specified and required in [CreateQueue][google.cloud.ta sks.v2beta3.CloudTasks.CreateQueue][], after which the queue config type becomes output only, though fields within the config are mutable. The queue's type. The type applies to all tasks in the queue. app_engine_http_queue: App Engine HTTP queue. An App Engine queue is a queue that has an [AppEngineHttpQeueue][] type. rate_limits: Rate limits for task dispatches. [rate\_limits][google.cloud.tasks.v2beta3.Queue.rate\_limits] and [retry\_config][google.cloud.tasks.v2beta3.Queue.retry\_co nfig] are related because they both control task attempts however they control how tasks are attempted in different ways: - [rate\_limits][google.cloud.tasks.v2beta3.Queue.rate\_limits] controls the total rate of dispatches from a queue (i.e. all traffic dispatched from the queue, regardless of whether the dispatch is from a first attempt or a retry). - [retry \_config][google.cloud.tasks.v2beta3.Queue.retry\_config] controls what happens to particular a task after its first attempt fails. That is, [retry\_config][google.cloud.tas ks.v2beta3.Queue.retry\_config] controls task retries (the second attempt, third attempt, etc). retry_config: Settings that determine the retry behavior. - For tasks created using Cloud Tasks: the queue-level retry settings apply to all tasks in the queue that were created using Cloud Tasks. Retry settings cannot be set on individual tasks. - For tasks created using the App Engine SDK: the queue-level retry settings apply to all tasks in the queue which do not have retry settings explicitly set on the task and were created by the App Engine SDK. See `App Engine documentation <https://cloud.google.com/appengine/docs/standar d/python/taskqueue/push/retrying-tasks>`_. state: Output only. The state of the queue. ``state`` can only be changed by called [PauseQueue][google.cloud.tasks.v2beta3.Clou dTasks.PauseQueue], [ResumeQueue][google.cloud.tasks.v2beta3.C loudTasks.ResumeQueue], or uploading `queue.yaml/xml <https:// cloud.google.com/appengine/docs/python/config/queueref>`_. [U pdateQueue][google.cloud.tasks.v2beta3.CloudTasks.UpdateQueue] cannot be used to change ``state``. purge_time: Output only. The last time this queue was purged. All tasks that were [created][google.cloud.tasks.v2beta3.Task.create\_time] before this time were purged. A queue can be purged using [PurgeQueu e][google.cloud.tasks.v2beta3.CloudTasks.PurgeQueue], the `App Engine Task Queue SDK, or the Cloud Console <https://cloud.goo gle.com/appengine/docs/standard/python/taskqueue/push/deleting -tasks-and-queues#purging_all_tasks_from_a_queue>`_. Purge time will be truncated to the nearest microsecond. Purge time will be unset if the queue has never been purged. """, # @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta3.Queue) )) _sym_db.RegisterMessage(Queue) RateLimits = _reflection.GeneratedProtocolMessageType('RateLimits', (_message.Message,), dict( DESCRIPTOR = _RATELIMITS, __module__ = 'google.cloud.tasks_v2beta3.proto.queue_pb2' , __doc__ = """Rate limits. This message determines the maximum rate that tasks can be dispatched by a queue, regardless of whether the dispatch is a first task attempt or a retry. Note: The debugging command, [RunTask][google.cloud.tasks.v2beta3.CloudTasks.RunTask], will run a task even if the queue has reached its [RateLimits][google.cloud.tasks.v2beta3.RateLimits]. Attributes: max_dispatches_per_second: The maximum rate at which tasks are dispatched from this queue. If unspecified when the queue is created, Cloud Tasks will pick the default. - For [App Engine queues][google.cloud.tasks.v2beta3.AppEngineHttpQueue], the maximum allowed value is 500. This field has the same meaning as `rate in queue.yaml/xml <https://cloud.google.com/a ppengine/docs/standard/python/config/queueref#rate>`_. max_burst_size: Output only. The max burst size. Max burst size limits how fast tasks in queue are processed when many tasks are in the queue and the rate is high. This field allows the queue to have a high rate so processing starts shortly after a task is enqueued, but still limits resource usage when many tasks are enqueued in a short period of time. The `token bucket <https://wikipedia.org/wiki/Token_Bucket>`_ algorithm is used to control the rate of task dispatches. Each queue has a token bucket that holds tokens, up to the maximum specified by ``max_burst_size``. Each time a task is dispatched, a token is removed from the bucket. Tasks will be dispatched until the queue's bucket runs out of tokens. The bucket will be continuously refilled with new tokens based on [max\_dispatche s\_per\_second][google.cloud.tasks.v2beta3.RateLimits.max\_dis patches\_per\_second]. Cloud Tasks will pick the value of ``max_burst_size`` based on the value of [max\_dispatches\_per \_second][google.cloud.tasks.v2beta3.RateLimits.max\_dispatche s\_per\_second]. For App Engine queues that were created or updated using ``queue.yaml/xml``, ``max_burst_size`` is equal to `bucket\_size <https://cloud.google.com/appengine/docs/stan dard/python/config/queueref#bucket_size>`_. Since ``max_burst_size`` is output only, if [UpdateQueue][google.clo ud.tasks.v2beta3.CloudTasks.UpdateQueue] is called on a queue created by ``queue.yaml/xml``, ``max_burst_size`` will be reset based on the value of [max\_dispatches\_per\_second][goo gle.cloud.tasks.v2beta3.RateLimits.max\_dispatches\_per\_secon d], regardless of whether [max\_dispatches\_per\_second][googl e.cloud.tasks.v2beta3.RateLimits.max\_dispatches\_per\_second] is updated. max_concurrent_dispatches: The maximum number of concurrent tasks that Cloud Tasks allows to be dispatched for this queue. After this threshold has been reached, Cloud Tasks stops dispatching tasks until the number of concurrent requests decreases. If unspecified when the queue is created, Cloud Tasks will pick the default. The maximum allowed value is 5,000. This field has the same meaning as `max\_concurrent\_requests in queue.yaml/xml <https ://cloud.google.com/appengine/docs/standard/python/config/queu eref#max_concurrent_requests>`_. """, # @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta3.RateLimits) )) _sym_db.RegisterMessage(RateLimits) RetryConfig = _reflection.GeneratedProtocolMessageType('RetryConfig', (_message.Message,), dict( DESCRIPTOR = _RETRYCONFIG, __module__ = 'google.cloud.tasks_v2beta3.proto.queue_pb2' , __doc__ = """Retry config. These settings determine when a failed task attempt is retried. Attributes: max_attempts: Number of attempts per task. Cloud Tasks will attempt the task ``max_attempts`` times (that is, if the first attempt fails, then there will be ``max_attempts - 1`` retries). Must be >= -1. If unspecified when the queue is created, Cloud Tasks will pick the default. -1 indicates unlimited attempts. This field has the same meaning as `task\_retry\_limit in queue.yaml/xml <https://cloud.google.com/appengine/docs/standa rd/python/config/queueref#retry_parameters>`_. max_retry_duration: If positive, ``max_retry_duration`` specifies the time limit for retrying a failed task, measured from when the task was first attempted. Once ``max_retry_duration`` time has passed *and* the task has been attempted [max\_attempts][google.cloud .tasks.v2beta3.RetryConfig.max\_attempts] times, no further attempts will be made and the task will be deleted. If zero, then the task age is unlimited. If unspecified when the queue is created, Cloud Tasks will pick the default. ``max_retry_duration`` will be truncated to the nearest second. This field has the same meaning as `task\_age\_limit in queue.yaml/xml <https://cloud.google.com/appengine/docs/sta ndard/python/config/queueref#retry_parameters>`_. min_backoff: A task will be [scheduled][google.cloud.tasks.v2beta3.Task.schedule\_time] for retry between [min\_backoff][google.cloud.tasks.v2beta3.Re tryConfig.min\_backoff] and [max\_backoff][google.cloud.tasks. v2beta3.RetryConfig.max\_backoff] duration after it fails, if the queue's [RetryConfig][google.cloud.tasks.v2beta3.RetryConfig] specifies that the task should be retried. If unspecified when the queue is created, Cloud Tasks will pick the default. ``min_backoff`` will be truncated to the nearest second. This field has the same meaning as `min\_backoff\_seconds in queue.yaml/xml <https://cloud.google.com/appengine/docs/standa rd/python/config/queueref#retry_parameters>`_. max_backoff: A task will be [scheduled][google.cloud.tasks.v2beta3.Task.schedule\_time] for retry between [min\_backoff][google.cloud.tasks.v2beta3.Re tryConfig.min\_backoff] and [max\_backoff][google.cloud.tasks. v2beta3.RetryConfig.max\_backoff] duration after it fails, if the queue's [RetryConfig][google.cloud.tasks.v2beta3.RetryConfig] specifies that the task should be retried. If unspecified when the queue is created, Cloud Tasks will pick the default. ``max_backoff`` will be truncated to the nearest second. This field has the same meaning as `max\_backoff\_seconds in queue.yaml/xml <https://cloud.google.com/appengine/docs/standa rd/python/config/queueref#retry_parameters>`_. max_doublings: The time between retries will double ``max_doublings`` times. A task's retry interval starts at [min\_backoff][google.cloud. tasks.v2beta3.RetryConfig.min\_backoff], then doubles ``max_doublings`` times, then increases linearly, and finally retries retries at intervals of [max\_backoff][google.cloud.ta sks.v2beta3.RetryConfig.max\_backoff] up to [max\_attempts][go ogle.cloud.tasks.v2beta3.RetryConfig.max\_attempts] times. For example, if [min\_backoff][google.cloud.tasks.v2beta3.Retr yConfig.min\_backoff] is 10s, [max\_backoff][google.cloud.task s.v2beta3.RetryConfig.max\_backoff] is 300s, and ``max_doublings`` is 3, then the a task will first be retried in 10s. The retry interval will double three times, and then increase linearly by 2^3 \* 10s. Finally, the task will retry at intervals of [max\_backoff][google.cloud.tasks.v2beta3.Retr yConfig.max\_backoff] until the task has been attempted [max\_ attempts][google.cloud.tasks.v2beta3.RetryConfig.max\_attempts ] times. Thus, the requests will retry at 10s, 20s, 40s, 80s, 160s, 240s, 300s, 300s, .... If unspecified when the queue is created, Cloud Tasks will pick the default. This field has the same meaning as `max\_doublings in queue.yaml/xml <https:/ /cloud.google.com/appengine/docs/standard/python/config/queuer ef#retry_parameters>`_. """, # @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta3.RetryConfig) )) _sym_db.RegisterMessage(RetryConfig) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\036com.google.cloud.tasks.v2beta3B\nQueueProtoP\001Z?google.golang.org/genproto/googleapis/cloud/tasks/v2beta3;tasks')) # @@protoc_insertion_point(module_scope)
apache-2.0
drpngx/tensorflow
tensorflow/contrib/distributions/python/ops/sample_stats.py
14
19943
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for computing statistics of samples.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import spectral_ops from tensorflow.python.ops.distributions import util __all__ = [ "auto_correlation", "percentile", ] # TODO(langmore) Write separate versions of this for real/complex dtype, taking # advantage of optimized real-fft ops. def auto_correlation( x, axis=-1, max_lags=None, center=True, normalize=True, name="auto_correlation"): """Auto correlation along one axis. Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation `RXX` may be defined as (with `E` expectation and `Conj` complex conjugate) ``` RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) }, W[n] := (X[n] - MU) / S, MU := E{ X[0] }, S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }. ``` This function takes the viewpoint that `x` is (along one axis) a finite sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an estimate of `RXX[m]` as follows: After extending `x` from length `L` to `inf` by zero padding, the auto correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as ``` rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]), w[n] := (x[n] - mu) / s, mu := L**-1 sum_n x[n], s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu) ``` The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users often set `max_lags` small enough so that the entire output is meaningful. Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by `len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation contains a slight bias, which goes to zero as `len(x) - m --> infinity`. Args: x: `float32` or `complex64` `Tensor`. axis: Python `int`. The axis number along which to compute correlation. Other dimensions index different batch members. max_lags: Positive `int` tensor. The maximum value of `m` to consider (in equation above). If `max_lags >= x.shape[axis]`, we effectively re-set `max_lags` to `x.shape[axis] - 1`. center: Python `bool`. If `False`, do not subtract the mean estimate `mu` from `x[n]` when forming `w[n]`. normalize: Python `bool`. If `False`, do not divide by the variance estimate `s**2` when forming `w[n]`. name: `String` name to prepend to created ops. Returns: `rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for `i != axis`, and `rxx.shape[axis] = max_lags + 1`. Raises: TypeError: If `x` is not a supported type. """ # Implementation details: # Extend length N / 2 1-D array x to length N by zero padding onto the end. # Then, set # F[x]_k := sum_n x_n exp{-i 2 pi k n / N }. # It is not hard to see that # F[x]_k Conj(F[x]_k) = F[R]_k, where # R_m := sum_n x_n Conj(x_{(n - m) mod N}). # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m]. # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT # based version of estimating RXX. # Note that this is a special case of the Wiener-Khinchin Theorem. with ops.name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") # Rotate dimensions of x in order to put axis at the rightmost dim. # FFT op requires this. rank = util.prefer_static_rank(x) if axis < 0: axis = rank + axis shift = rank - 1 - axis # Suppose x.shape[axis] = T, so there are T "time" steps. # ==> x_rotated.shape = B + [T], # where B is x_rotated's batch shape. x_rotated = util.rotate_transpose(x, shift) if center: x_rotated -= math_ops.reduce_mean(x_rotated, axis=-1, keepdims=True) # x_len = N / 2 from above explanation. The length of x along axis. # Get a value for x_len that works in all cases. x_len = util.prefer_static_shape(x_rotated)[-1] # TODO(langmore) Investigate whether this zero padding helps or hurts. At # the moment is is necessary so that all FFT implementations work. # Zero pad to the next power of 2 greater than 2 * x_len, which equals # 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2). x_len_float64 = math_ops.cast(x_len, np.float64) target_length = math_ops.pow( np.float64(2.), math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.))) pad_length = math_ops.cast(target_length - x_len_float64, np.int32) # We should have: # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length] # = B + [T + pad_length] x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length) dtype = x.dtype if not dtype.is_complex: if not dtype.is_floating: raise TypeError("Argument x must have either float or complex dtype" " found: {}".format(dtype)) x_rotated_pad = math_ops.complex(x_rotated_pad, dtype.real_dtype.as_numpy_dtype(0.)) # Autocorrelation is IFFT of power-spectral density (up to some scaling). fft_x_rotated_pad = spectral_ops.fft(x_rotated_pad) spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad) # shifted_product is R[m] from above detailed explanation. # It is the inner product sum_n X[n] * Conj(X[n - m]). shifted_product = spectral_ops.ifft(spectral_density) # Cast back to real-valued if x was real to begin with. shifted_product = math_ops.cast(shifted_product, dtype) # Figure out if we can deduce the final static shape, and set max_lags. # Use x_rotated as a reference, because it has the time dimension in the far # right, and was created before we performed all sorts of crazy shape # manipulations. know_static_shape = True if not x_rotated.shape.is_fully_defined(): know_static_shape = False if max_lags is None: max_lags = x_len - 1 else: max_lags = ops.convert_to_tensor(max_lags, name="max_lags") max_lags_ = tensor_util.constant_value(max_lags) if max_lags_ is None or not know_static_shape: know_static_shape = False max_lags = math_ops.minimum(x_len - 1, max_lags) else: max_lags = min(x_len - 1, max_lags_) # Chop off the padding. # We allow users to provide a huge max_lags, but cut it off here. # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags] shifted_product_chopped = shifted_product[..., :max_lags + 1] # If possible, set shape. if know_static_shape: chopped_shape = x_rotated.shape.as_list() chopped_shape[-1] = min(x_len, max_lags + 1) shifted_product_chopped.set_shape(chopped_shape) # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The # other terms were zeros arising only due to zero padding. # `denominator = (N / 2 - m)` (defined below) is the proper term to # divide by by to make this an unbiased estimate of the expectation # E[X[n] Conj(X[n - m])]. x_len = math_ops.cast(x_len, dtype.real_dtype) max_lags = math_ops.cast(max_lags, dtype.real_dtype) denominator = x_len - math_ops.range(0., max_lags + 1.) denominator = math_ops.cast(denominator, dtype) shifted_product_rotated = shifted_product_chopped / denominator if normalize: shifted_product_rotated /= shifted_product_rotated[..., :1] # Transpose dimensions back to those of x. return util.rotate_transpose(shifted_product_rotated, -shift) # TODO(langmore) To make equivalent to numpy.percentile: # Make work with a sequence of floats or single float for 'q'. # Make work with "linear", "midpoint" interpolation. (linear should be default) def percentile(x, q, axis=None, interpolation=None, keep_dims=False, validate_args=False, name=None): """Compute the `q`-th percentile of `x`. Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the way from the minimum to the maximum in a sorted copy of `x`. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of `q` exactly. This function is the same as the median if `q = 50`, the same as the minimum if `q = 0` and the same as the maximum if `q = 100`. ```python # Get 30th percentile with default ('nearest') interpolation. x = [1., 2., 3., 4.] percentile(x, q=30.) ==> 2.0 # Get 30th percentile with 'lower' interpolation x = [1., 2., 3., 4.] percentile(x, q=30., interpolation='lower') ==> 1.0 # Get 100th percentile (maximum). By default, this is computed over every dim x = [[1., 2.] [3., 4.]] percentile(x, q=100.) ==> 4.0 # Treat the leading dim as indexing samples, and find the 100th quantile (max) # over all such samples. x = [[1., 2.] [3., 4.]] percentile(x, q=100., axis=[0]) ==> [3., 4.] ``` Compare to `numpy.percentile`. Args: x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`, `x` must have statically known number of dimensions. q: Scalar `Tensor` in `[0, 100]`. The percentile. axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The axis that hold independent samples over which to return the desired percentile. If `None` (the default), treat every dimension as a sample dimension, returning a scalar. interpolation : {"lower", "higher", "nearest"}. Default: "nearest" This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points `i < j`: * lower: `i`. * higher: `j`. * nearest: `i` or `j`, whichever is nearest. keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1 If `False`, the last dimension is removed from the output shape. validate_args: Whether to add runtime checks of argument validity. If False, and arguments are incorrect, correct behavior is not guaranteed. name: A Python string name to give this `Op`. Default is "percentile" Returns: A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if `axis` is `None`, a scalar. Raises: ValueError: If argument 'interpolation' is not an allowed type. """ name = name or "percentile" allowed_interpolations = {"lower", "higher", "nearest"} if interpolation is None: interpolation = "nearest" else: if interpolation not in allowed_interpolations: raise ValueError("Argument 'interpolation' must be in %s. Found %s" % (allowed_interpolations, interpolation)) with ops.name_scope(name, [x, q]): x = ops.convert_to_tensor(x, name="x") # Double is needed here and below, else we get the wrong index if the array # is huge along axis. q = math_ops.to_double(q, name="q") _get_static_ndims(q, expect_ndims=0) if validate_args: q = control_flow_ops.with_dependencies([ check_ops.assert_rank(q, 0), check_ops.assert_greater_equal(q, math_ops.to_double(0.)), check_ops.assert_less_equal(q, math_ops.to_double(100.)) ], q) if axis is None: y = array_ops.reshape(x, [-1]) else: axis = ops.convert_to_tensor(axis, name="axis") check_ops.assert_integer(axis) axis_ndims = _get_static_ndims( axis, expect_static=True, expect_ndims_no_more_than=1) axis_const = tensor_util.constant_value(axis) if axis_const is None: raise ValueError( "Expected argument 'axis' to be statically available. Found: %s" % axis) axis = axis_const if axis_ndims == 0: axis = [axis] axis = [int(a) for a in axis] x_ndims = _get_static_ndims( x, expect_static=True, expect_ndims_at_least=1) axis = _make_static_axis_non_negative(axis, x_ndims) y = _move_dims_to_flat_end(x, axis, x_ndims) frac_at_q_or_above = 1. - q / 100. d = math_ops.to_double(array_ops.shape(y)[-1]) if interpolation == "lower": index = math_ops.ceil((d - 1) * frac_at_q_or_above) elif interpolation == "higher": index = math_ops.floor((d - 1) * frac_at_q_or_above) elif interpolation == "nearest": index = math_ops.round((d - 1) * frac_at_q_or_above) # If d is gigantic, then we would have d == d - 1, even in double... So # let's use max/min to avoid out of bounds errors. d = array_ops.shape(y)[-1] # d - 1 will be distinct from d in int32. index = clip_ops.clip_by_value(math_ops.to_int32(index), 0, d - 1) # Sort everything, not just the top 'k' entries, which allows multiple calls # to sort only once (under the hood) and use CSE. sorted_y = _sort_tensor(y) # result.shape = B result = sorted_y[..., index] result.set_shape(y.get_shape()[:-1]) if keep_dims: if axis is None: # ones_vec = [1, 1,..., 1], total length = len(S) + len(B). ones_vec = array_ops.ones( shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32) result *= array_ops.ones(ones_vec, dtype=x.dtype) else: result = _insert_back_keep_dims(result, axis) return result def _get_static_ndims(x, expect_static=False, expect_ndims=None, expect_ndims_no_more_than=None, expect_ndims_at_least=None): """Get static number of dimensions and assert that some expectations are met. This function returns the number of dimensions "ndims" of x, as a Python int. The optional expect arguments are used to check the ndims of x, but this is only done if the static ndims of x is not None. Args: x: A Tensor. expect_static: Expect `x` to have statically defined `ndims`. expect_ndims: Optional Python integer. If provided, assert that x has number of dimensions equal to this. expect_ndims_no_more_than: Optional Python integer. If provided, assert that x has no more than this many dimensions. expect_ndims_at_least: Optional Python integer. If provided, assert that x has at least this many dimensions. Returns: ndims: A Python integer. Raises: ValueError: If any of the expectations above are violated. """ ndims = x.get_shape().ndims if ndims is None: shape_const = tensor_util.constant_value(array_ops.shape(x)) if shape_const is not None: ndims = shape_const.ndim if ndims is None: if expect_static: raise ValueError( "Expected argument 'x' to have statically defined 'ndims'. Found: " % x) return if expect_ndims is not None: ndims_message = ("Expected argument 'x' to have ndims %s. Found tensor %s" % (expect_ndims, x)) if ndims != expect_ndims: raise ValueError(ndims_message) if expect_ndims_at_least is not None: ndims_at_least_message = ( "Expected argument 'x' to have ndims >= %d. Found tensor %s" % ( expect_ndims_at_least, x)) if ndims < expect_ndims_at_least: raise ValueError(ndims_at_least_message) if expect_ndims_no_more_than is not None: ndims_no_more_than_message = ( "Expected argument 'x' to have ndims <= %d. Found tensor %s" % ( expect_ndims_no_more_than, x)) if ndims > expect_ndims_no_more_than: raise ValueError(ndims_no_more_than_message) return ndims def _get_best_effort_ndims(x, expect_ndims=None, expect_ndims_at_least=None, expect_ndims_no_more_than=None): """Get static ndims if possible. Fallback on `tf.rank(x)`.""" ndims_static = _get_static_ndims( x, expect_ndims=expect_ndims, expect_ndims_at_least=expect_ndims_at_least, expect_ndims_no_more_than=expect_ndims_no_more_than) if ndims_static is not None: return ndims_static return array_ops.rank(x) def _insert_back_keep_dims(x, axis): """Insert the dims in `axis` back as singletons after being removed. Args: x: `Tensor`. axis: Python list of integers. Returns: `Tensor` with same values as `x`, but additional singleton dimensions. """ for i in sorted(axis): x = array_ops.expand_dims(x, axis=i) return x def _make_static_axis_non_negative(axis, ndims): """Convert possibly negatively indexed axis to non-negative. Args: axis: Iterable over Python integers. ndims: Number of dimensions into which axis indexes. Returns: A list of non-negative Python integers. Raises: ValueError: If values in `axis` are too big/small to index into `ndims`. """ non_negative_axis = [] for d in axis: if d >= 0: if d >= ndims: raise ValueError("dim %d not in the interval [0, %d]." % (d, ndims - 1)) non_negative_axis.append(d) else: if d < -1 * ndims: raise ValueError( "Negatively indexed dim %d not in the interval [-%d, -1]" % (d, ndims)) non_negative_axis.append(ndims + d) return non_negative_axis def _move_dims_to_flat_end(x, axis, x_ndims): """Move dims corresponding to `axis` in `x` to the end, then flatten. Args: x: `Tensor` with shape `[B0,B1,...,Bb]`. axis: Python list of indices into dimensions of `x`. x_ndims: Python integer holding number of dimensions in `x`. Returns: `Tensor` with value from `x` and dims in `axis` moved to end into one single dimension. """ # Suppose x.shape = [a, b, c, d] # Suppose axis = [1, 3] # front_dims = [0, 2] in example above. front_dims = sorted(set(range(x_ndims)).difference(axis)) # x_permed.shape = [a, c, b, d] x_permed = array_ops.transpose(x, perm=front_dims + list(axis)) if x.get_shape().is_fully_defined(): x_shape = x.get_shape().as_list() # front_shape = [a, c], end_shape = [b * d] front_shape = [x_shape[i] for i in front_dims] end_shape = [np.prod([x_shape[i] for i in axis])] full_shape = front_shape + end_shape else: front_shape = array_ops.shape(x_permed)[:x_ndims - len(axis)] end_shape = [-1] full_shape = array_ops.concat([front_shape, end_shape], axis=0) return array_ops.reshape(x_permed, shape=full_shape) def _sort_tensor(tensor): """Use `top_k` to sort a `Tensor` along the last dimension.""" sorted_, _ = nn_ops.top_k(tensor, k=array_ops.shape(tensor)[-1]) return sorted_
apache-2.0
mrbox/django
tests/forms_tests/widget_tests/test_datetimeinput.py
247
2367
from datetime import datetime from django.forms import DateTimeInput from django.test import override_settings from django.utils import translation from .base import WidgetTest class DateTimeInputTest(WidgetTest): widget = DateTimeInput() def test_render_none(self): self.check_html(self.widget, 'date', None, '<input type="text" name="date" />') def test_render_value(self): """ The microseconds are trimmed on display, by default. """ d = datetime(2007, 9, 17, 12, 51, 34, 482548) self.assertEqual(str(d), '2007-09-17 12:51:34.482548') self.check_html(self.widget, 'date', d, html=( '<input type="text" name="date" value="2007-09-17 12:51:34" />' )) self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51, 34), html=( '<input type="text" name="date" value="2007-09-17 12:51:34" />' )) self.check_html(self.widget, 'date', datetime(2007, 9, 17, 12, 51), html=( '<input type="text" name="date" value="2007-09-17 12:51:00" />' )) def test_render_formatted(self): """ Use 'format' to change the way a value is displayed. """ widget = DateTimeInput( format='%d/%m/%Y %H:%M', attrs={'type': 'datetime'}, ) d = datetime(2007, 9, 17, 12, 51, 34, 482548) self.check_html(widget, 'date', d, html='<input type="datetime" name="date" value="17/09/2007 12:51" />') @override_settings(USE_L10N=True) @translation.override('de-at') def test_l10n(self): d = datetime(2007, 9, 17, 12, 51, 34, 482548) self.check_html(self.widget, 'date', d, html=( '<input type="text" name="date" value="17.09.2007 12:51:34" />' )) @override_settings(USE_L10N=True) @translation.override('de-at') def test_locale_aware(self): d = datetime(2007, 9, 17, 12, 51, 34, 482548) with self.settings(USE_L10N=False): self.check_html( self.widget, 'date', d, html='<input type="text" name="date" value="2007-09-17 12:51:34" />', ) with translation.override('es'): self.check_html( self.widget, 'date', d, html='<input type="text" name="date" value="17/09/2007 12:51:34" />', )
bsd-3-clause
collex100/odoo
addons/l10n_fr_hr_payroll/report/fiche_paye.py
303
3203
#!/usr/bin/env python #-*- coding:utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # d$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv from openerp.report import report_sxw class fiche_paye_parser(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(fiche_paye_parser, self).__init__(cr, uid, name, context) self.localcontext.update({ 'lang': "fr_FR", 'get_payslip_lines': self.get_payslip_lines, 'get_total_by_rule_category': self.get_total_by_rule_category, 'get_employer_line': self.get_employer_line, }) def get_payslip_lines(self, objs): payslip_line = self.pool.get('hr.payslip.line') res = [] ids = [] for item in objs: if item.appears_on_payslip == True and not item.salary_rule_id.parent_rule_id : ids.append(item.id) if ids: res = payslip_line.browse(self.cr, self.uid, ids) return res def get_total_by_rule_category(self, obj, code): payslip_line = self.pool.get('hr.payslip.line') rule_cate_obj = self.pool.get('hr.salary.rule.category') cate_ids = rule_cate_obj.search(self.cr, self.uid, [('code', '=', code)]) category_total = 0 if cate_ids: line_ids = payslip_line.search(self.cr, self.uid, [('slip_id', '=', obj.id),('category_id.id', '=', cate_ids[0] )]) for line in payslip_line.browse(self.cr, self.uid, line_ids): category_total += line.total return category_total def get_employer_line(self, obj, parent_line): payslip_line = self.pool.get('hr.payslip.line') line_ids = payslip_line.search(self.cr, self.uid, [('slip_id', '=', obj.id), ('salary_rule_id.parent_rule_id.id', '=', parent_line.salary_rule_id.id )]) res = line_ids and payslip_line.browse(self.cr, self.uid, line_ids[0]) or False return res class wrapped_report_fiche_paye(osv.AbstractModel): _name = 'report.l10n_fr_hr_payroll.report_l10nfrfichepaye' _inherit = 'report.abstract_report' _template = 'l10n_fr_hr_payroll.report_l10nfrfichepaye' _wrapped_report_class = fiche_paye_parser # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
patmun/pynetdicom
netdicom/fsm.py
1
17051
# # Copyright (c) 2012 Patrice Munger # This file is part of pynetdicom, released under a modified MIT license. # See the file license.txt included with this distribution, also # available at http://pynetdicom.googlecode.com # # Implementation of the OSI Upper Layer Services # DICOM, Part 8, Section 7 import socket import PDU import time import DULparameters # Finite State machine action definitions import logging logger = logging.getLogger(__name__) def AE_1(provider): # Issue TRANSPORT CONNECT request primitive to local transport service provider.RemoteClientSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM) try: timeout_original = provider.RemoteClientSocket.gettimeout() if provider.ConnectTimeoutSeconds is not None: provider.RemoteClientSocket.settimeout(provider.ConnectTimeoutSeconds) provider.RemoteClientSocket.connect( provider.primitive.CalledPresentationAddress) provider.RemoteClientSocket.settimeout(timeout_original) except socket.error: # cannot connect provider.ToServiceUser.put(None) def AE_2(provider): # Send A-ASSOCIATE-RQ PDU provider.pdu = PDU.A_ASSOCIATE_RQ_PDU() provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) def AE_3(provider): # Issue A-ASSOCIATE confirmation (accept) primitive provider.ToServiceUser.put(provider.primitive) def AE_4(provider): # Issue A-ASSOCIATE confirmation (reject) primitive and close transport # connection provider.ToServiceUser.put(provider.primitive) provider.RemoteClientSocket.close() provider.RemoteClientSocket = None def AE_5(provider): # Issue connection response primitive start ARTIM timer # Don't need to send this primitive. provider.Timer.Start() def AE_6(provider): # Stop ARTIM timer and if A-ASSOCIATE-RQ acceptable by service provider # - Issue A-ASSOCIATE indication primitive provider.Timer.Stop() # Accept provider.SM.NextState('Sta3') provider.ToServiceUser.put(provider.primitive) # otherwise???? def AE_7(provider): # Send A-ASSOCIATE-AC PDU provider.pdu = PDU.A_ASSOCIATE_AC_PDU() provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) def AE_8(provider): # Send A-ASSOCIATE-RJ PDU and start ARTIM timer provider.pdu = PDU.A_ASSOCIATE_RJ_PDU() # not sure about this ... if provider.primitive.Diagnostic is not None: provider.primitive.ResultSource = provider.primitive.Diagnostic.source #else: # provider.primitive.Diagnostic = 1 # provider.primitive.ResultSource = 2 provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) def DT_1(provider): # Send P-DATA-TF PDU provider.pdu = PDU.P_DATA_TF_PDU() provider.pdu.FromParams(provider.primitive) provider.primitive = None provider.RemoteClientSocket.send(provider.pdu.Encode()) def DT_2(provider): # Send P-DATA indication primitive provider.ToServiceUser.put(provider.primitive) def AR_1(provider): # Send A-RELEASE-RQ PDU provider.pdu = PDU.A_RELEASE_RQ_PDU() provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) def AR_2(provider): # Send A-RELEASE indication primitive provider.ToServiceUser.put(provider.primitive) def AR_3(provider): # Issue A-RELEASE confirmation primitive and close transport connection provider.ToServiceUser.put(provider.primitive) provider.RemoteClientSocket.close() provider.RemoteClientSocket = None def AR_4(provider): # Issue A-RELEASE-RP PDU and start ARTIM timer provider.pdu = PDU.A_RELEASE_RP_PDU() provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) provider.Timer.Start() def AR_5(provider): # Stop ARTIM timer provider.Timer.Stop() def AR_6(provider): # Issue P-DATA indication provider.ToServiceUser.put(provider.primitive) def AR_7(provider): # Issue P-DATA-TF PDU provider.pdu = PDU.P_DATA_TF_PDU() provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) def AR_8(provider): # Issue A-RELEASE indication (release collision) provider.ToServiceUser.put(provider.primitive) if provider.requestor == 1: provider.SM.NextState('Sta9') else: provider.SM.NextState('Sta10') def AR_9(provider): # Send A-RELEASE-RP PDU provider.pdu = PDU.A_RELEASE_RP_PDU() provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) def AR_10(provider): # Issue A-RELEASE confirmation primitive provider.ToServiceUser.put(provider.primitive) def AA_1(provider): # Send A-ABORT PDU (service-user source) and start (or restart # if already started) ARTIM timer. provider.pdu = PDU.A_ABORT_PDU() # CHECK THIS ... provider.pdu.AbortSource = 1 provider.pdu.ReasonDiag = 0 provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) provider.Timer.Restart() def AA_2(provider): # Stop ARTIM timer if running. Close transport connection. provider.Timer.Stop() provider.RemoteClientSocket.close() provider.RemoteClientSocket = None def AA_3(provider): # If (service-user initiated abort): # - Issue A-ABORT indication and close transport connection. # Otherwise (service-provider initiated abort): # - Issue A-P-ABORT indication and close transport connection. # This action is triggered by the reception of an A-ABORT PDU provider.ToServiceUser.put(provider.primitive) provider.RemoteClientSocket.close() provider.RemoteClientSocket = None def AA_4(provider): # Issue A-P-ABORT indication primitive. provider.primitive = DULparameters.A_ABORT_ServiceParameters() provider.ToServiceUser.put(provider.primitive) def AA_5(provider): # Stop ARTIM timer. provider.Timer.Stop() def AA_6(provider): # Ignore PDU. provider.primitive = None def AA_7(provider): # Send A-ABORT PDU. provider.pdu = PDU.A_ABORT_PDU() provider.pdu.FromParams(provider.primitive) provider.RemoteClientSocket.send(provider.pdu.Encode()) def AA_8(provider): # Send A-ABORT PDU (service-provider source), issue and A-P-ABORT # indication, and start ARTIM timer. # Send A-ABORT PDU provider.pdu = PDU.A_ABORT_PDU() provider.pdu.Source = 2 provider.pdu.ReasonDiag = 0 # No reason given if provider.RemoteClientSocket: provider.RemoteClientSocket.send(provider.pdu.Encode()) # Issue A-P-ABORT indication provider.ToServiceUser.put(provider.primitive) provider.Timer.Start() # Finite State Machine # states states = { # No association 'Sta1': 'Idle', # Association establishment 'Sta2': 'Transport Connection Open (Awaiting A-ASSOCIATE-RQ PDU)', 'Sta3': 'Awaiting Local A-ASSOCIATE response primitive (from local user)', 'Sta4': 'Awaiting transport connection opening to complete (from local ' 'transport service', 'Sta5': 'Awaiting A-ASSOCIATE-AC or A-ASSOCIATE-RJ PDU', # Data transfer 'Sta6': 'Association established and ready for data transfer', # Association release 'Sta7': 'Awaiting A-RELEASE-RP PDU', 'Sta8': 'Awaiting local A-RELEASE response primitive (from local user)', 'Sta9': 'Release collision requestor side; awaiting A-RELEASE response ' ' (from local user)', 'Sta10': 'Release collision acceptor side; awaiting A-RELEASE-RP PDU', 'Sta11': 'Release collision requestor side; awaiting A-RELEASE-RP PDU', 'Sta12': 'Release collision acceptor side; awaiting A-RELEASE response ' 'primitive (from local user)', 'Sta13': 'Awaiting Transport Connection Close Indication (Association no ' 'longer exists)' } # actions actions = { # Association establishment actions 'AE-1': ('Issue TransportConnect request primitive to local transport ' 'service', AE_1, 'Sta4'), 'AE-2': ('Send A_ASSOCIATE-RQ PDU', AE_2, 'Sta5'), 'AE-3': ('Issue A-ASSOCIATE confirmation (accept) primitive', AE_3, 'Sta6'), 'AE-4': ('Issue A-ASSOCIATE confirmation (reject) primitive and close ' 'transport connection', AE_4, 'Sta1'), 'AE-5': ('Issue transport connection response primitive; start ARTIM ' 'timer', AE_5, 'Sta2'), 'AE-6': ('Check A-ASSOCIATE-RQ', AE_6, ('Sta3', 'Sta13')), 'AE-7': ('Send A-ASSOCIATE-AC PDU', AE_7, 'Sta6'), 'AE-8': ('Send A-ASSOCIATE-RJ PDU', AE_8, 'Sta13'), # Data transfer related actions 'DT-1': ('Send P-DATA-TF PDU', DT_1, 'Sta6'), 'DT-2': ('Send P-DATA indication primitive', DT_2, 'Sta6'), # Assocation Release related actions 'AR-1': ('Send A-RELEASE-RQ PDU', AR_1, 'Sta7'), 'AR-2': ('Send A-RELEASE indication primitive', AR_2, 'Sta8'), 'AR-3': ('Issue A-RELEASE confirmation primitive and close transport ' 'connection', AR_3, 'Sta1'), 'AR-4': ('Issue A-RELEASE-RP PDU and start ARTIM timer', AR_4, 'Sta13'), 'AR-5': ('Stop ARTIM timer', AR_5, 'Sta1'), 'AR-6': ('Issue P-DATA indication', AR_6, 'Sta7'), 'AR-7': ('Issue P-DATA-TF PDU', AR_7, 'Sta8'), 'AR-8': ('Issue A-RELEASE indication (release collision)', AR_8, ('Sta9', 'Sta10')), 'AR-9': ('Send A-RELEASE-RP PDU', AR_9, 'Sta11'), 'AR-10': ('Issue A-RELEASE confimation primitive', AR_10, 'Sta12'), # Association abort related actions 'AA-1': ('Send A-ABORT PDU (service-user source) and start (or restart) ' 'ARTIM timer', AA_1, 'Sta13'), 'AA-2': ('Stop ARTIM timer if running. Close transport connection', AA_2, 'Sta1'), 'AA-3': ('Issue A-ABORT or A-P-ABORT indication and close transport ' 'connection', AA_3, 'Sta1'), 'AA-4': ('Issue A-P-ABORT indication primitive', AA_4, 'Sta1'), 'AA-5': ('Stop ARTIM timer', AA_5, 'Sta1'), 'AA-6': ('Ignore PDU', AA_6, 'Sta13'), 'AA-7': ('Send A-ABORT PDU', AA_7, 'Sta13'), 'AA-8': ('Send A-ABORT PDU, issue an A-P-ABORT indication and start ' 'ARTIM timer', AA_8, 'Sta13')} # events events = { 'Evt1': "A-ASSOCIATE request (local user)", 'Evt2': "Transport connect confirmation (local transport service)", 'Evt3': "A-ASSOCIATE-AC PDU (received on transport connection)", 'Evt4': "A-ASSOCIATE-RJ PDU (received on transport connection)", 'Evt5': "Transport connection indication (local transport service)", 'Evt6': "A-ASSOCIATE-RQ PDU (on tranport connection)", 'Evt7': "A-ASSOCIATE response primitive (accept)", 'Evt8': "A-ASSOCIATE response primitive (reject)", 'Evt9': "P-DATA request primitive", 'Evt10': "P-DATA-TF PDU (on transport connection)", 'Evt11': "A-RELEASE request primitive", 'Evt12': "A-RELEASE-RQ PDU (on transport)", 'Evt13': "A-RELEASE-RP PDU (on transport)", 'Evt14': "A-RELEASE response primitive", 'Evt15': "A-ABORT request primitive", 'Evt16': "A-ABORT PDU (on transport)", 'Evt17': "Transport connection closed", 'Evt18': "ARTIM timer expired (rej/rel)", 'Evt19': "Unrecognized/invalid PDU"} TransitionTable = { ('Evt1', 'Sta1'): 'AE-1', ('Evt2', 'Sta4'): 'AE-2', ('Evt3', 'Sta2'): 'AA-1', ('Evt3', 'Sta3'): 'AA-8', ('Evt3', 'Sta5'): 'AE-3', ('Evt3', 'Sta6'): 'AA-8', ('Evt3', 'Sta7'): 'AA-8', ('Evt3', 'Sta8'): 'AA-8', ('Evt3', 'Sta9'): 'AA-8', ('Evt3', 'Sta10'): 'AA-8', ('Evt3', 'Sta11'): 'AA-8', ('Evt3', 'Sta12'): 'AA-8', ('Evt3', 'Sta13'): 'AA-6', ('Evt4', 'Sta2'): 'AA-1', ('Evt4', 'Sta3'): 'AA-8', ('Evt4', 'Sta5'): 'AE-4', ('Evt4', 'Sta6'): 'AA-8', ('Evt4', 'Sta7'): 'AA-8', ('Evt4', 'Sta8'): 'AA-8', ('Evt4', 'Sta9'): 'AA-8', ('Evt4', 'Sta10'): 'AA-8', ('Evt4', 'Sta11'): 'AA-8', ('Evt4', 'Sta12'): 'AA-8', ('Evt4', 'Sta13'): 'AA-6', ('Evt5', 'Sta1'): 'AE-5', ('Evt6', 'Sta2'): 'AE-6', ('Evt6', 'Sta3'): 'AA-8', ('Evt6', 'Sta5'): 'AA-8', ('Evt6', 'Sta6'): 'AA-8', ('Evt6', 'Sta7'): 'AA-8', ('Evt6', 'Sta8'): 'AA-8', ('Evt6', 'Sta9'): 'AA-8', ('Evt6', 'Sta10'): 'AA-8', ('Evt6', 'Sta11'): 'AA-8', ('Evt6', 'Sta12'): 'AA-8', ('Evt6', 'Sta13'): 'AA-7', ('Evt7', 'Sta3'): 'AE-7', ('Evt8', 'Sta3'): 'AE-8', ('Evt9', 'Sta6'): 'DT-1', ('Evt9', 'Sta8'): 'AR-7', ('Evt10', 'Sta2'): 'AA-1', ('Evt10', 'Sta3'): 'AA-8', ('Evt10', 'Sta5'): 'AA-8', ('Evt10', 'Sta6'): 'DT-2', ('Evt10', 'Sta7'): 'AR-6', ('Evt10', 'Sta8'): 'AA-8', ('Evt10', 'Sta9'): 'AA-8', ('Evt10', 'Sta10'): 'AA-8', ('Evt10', 'Sta11'): 'AA-8', ('Evt10', 'Sta12'): 'AA-8', ('Evt10', 'Sta13'): 'AA-6', ('Evt11', 'Sta6'): 'AR-1', ('Evt12', 'Sta2'): 'AA-1', ('Evt12', 'Sta3'): 'AA-8', ('Evt12', 'Sta5'): 'AA-8', ('Evt12', 'Sta6'): 'AR-2', ('Evt12', 'Sta7'): 'AR-8', ('Evt12', 'Sta8'): 'AA-8', ('Evt12', 'Sta9'): 'AA-8', ('Evt12', 'Sta10'): 'AA-8', ('Evt12', 'Sta11'): 'AA-8', ('Evt12', 'Sta12'): 'AA-8', ('Evt12', 'Sta13'): 'AA-6', ('Evt13', 'Sta2'): 'AA-1', ('Evt13', 'Sta3'): 'AA-8', ('Evt13', 'Sta5'): 'AA-8', ('Evt13', 'Sta6'): 'AA-8', ('Evt13', 'Sta7'): 'AR-3', ('Evt13', 'Sta8'): 'AA-8', ('Evt13', 'Sta9'): 'AA-8', ('Evt13', 'Sta10'): 'AR-10', ('Evt13', 'Sta11'): 'AR-3', ('Evt13', 'Sta12'): 'AA-8', ('Evt13', 'Sta13'): 'AA-6', ('Evt14', 'Sta8'): 'AR-4', ('Evt14', 'Sta9'): 'AR-9', ('Evt14', 'Sta12'): 'AR-4', ('Evt15', 'Sta3'): 'AA-1', ('Evt15', 'Sta4'): 'AA-2', ('Evt15', 'Sta5'): 'AA-1', ('Evt15', 'Sta6'): 'AA-1', ('Evt15', 'Sta7'): 'AA-1', ('Evt15', 'Sta8'): 'AA-1', ('Evt15', 'Sta9'): 'AA-1', ('Evt15', 'Sta10'): 'AA-1', ('Evt15', 'Sta11'): 'AA-1', ('Evt15', 'Sta12'): 'AA-1', ('Evt16', 'Sta2'): 'AA-2', ('Evt16', 'Sta3'): 'AA-3', ('Evt16', 'Sta5'): 'AA-3', ('Evt16', 'Sta6'): 'AA-3', ('Evt16', 'Sta7'): 'AA-3', ('Evt16', 'Sta8'): 'AA-3', ('Evt16', 'Sta9'): 'AA-3', ('Evt16', 'Sta10'): 'AA-3', ('Evt16', 'Sta11'): 'AA-3', ('Evt16', 'Sta12'): 'AA-3', ('Evt16', 'Sta13'): 'AA-2', ('Evt17', 'Sta2'): 'AA-5', ('Evt17', 'Sta3'): 'AA-4', ('Evt17', 'Sta4'): 'AA-4', ('Evt17', 'Sta5'): 'AA-4', ('Evt17', 'Sta6'): 'AA-4', ('Evt17', 'Sta7'): 'AA-4', ('Evt17', 'Sta8'): 'AA-4', ('Evt17', 'Sta9'): 'AA-4', ('Evt17', 'Sta10'): 'AA-4', ('Evt17', 'Sta11'): 'AA-4', ('Evt17', 'Sta12'): 'AA-4', ('Evt17', 'Sta13'): 'AR-5', ('Evt18', 'Sta2'): 'AA-2', ('Evt18', 'Sta13'): 'AA-2', ('Evt19', 'Sta2'): 'AA-1', ('Evt19', 'Sta3'): 'AA-8', ('Evt19', 'Sta5'): 'AA-8', ('Evt19', 'Sta6'): 'AA-8', ('Evt19', 'Sta7'): 'AA-8', ('Evt19', 'Sta8'): 'AA-8', ('Evt19', 'Sta9'): 'AA-8', ('Evt19', 'Sta10'): 'AA-8', ('Evt19', 'Sta11'): 'AA-8', ('Evt19', 'Sta12'): 'AA-8', ('Evt19', 'Sta13'): 'AA-7'} class StateMachine: def __init__(self, provider): self.CurrentState = 'Sta1' self.provider = provider def Action(self, event, c): """ Execute the action triggered by event """ try: action_name = TransitionTable[(event, self.CurrentState)] except: logger.debug('%s: current state is: %s %s' % (self.provider.name, self.CurrentState, states[self.CurrentState])) logger.debug('%s: event: %s %s' % (self.provider.name, event, events[event])) raise return action = actions[action_name] try: logger.debug('') logger.debug('%s: current state is: %s %s' % (self.provider.name, self.CurrentState, states[self.CurrentState])) logger.debug('%s: event: %s %s' % (self.provider.name, event, events[event])) logger.debug('%s: entering action: (%s, %s) %s %s' % (self.provider.name, event, self.CurrentState, action_name, actions[action_name][0])) action[1](c) #if type(action[2]) != type(()): if not isinstance(action[2], tuple): # only one next state possible self.CurrentState = action[2] logger.debug('%s: action complete. State is now %s %s' % (self.provider.name, self.CurrentState, states[self.CurrentState])) except: raise self.provider.Kill() def NextState(self, state): self.CurrentState = state
mit
DeadBugEngineering/myHDL_shenanigans
ssd1306_8x64bit_driver/myhdl_10dev/lib/python2.7/site-packages/myhdl-1.0.dev0-py2.7.egg/myhdl/_always_seq.py
3
5131
# This file is part of the myhdl library, a Python package for using # Python as a Hardware Description Language. # # Copyright (C) 2003-2012 Jan Decaluwe # # The myhdl library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of the # License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ Module with the always_seq decorator. """ from __future__ import absolute_import from types import FunctionType from myhdl import AlwaysError, intbv from myhdl._util import _isGenFunc from myhdl._Signal import _Signal, _WaiterList, _isListOfSigs from myhdl._always import _Always, _get_sigdict from myhdl._instance import _getCallInfo # evacuate this later AlwaysSeqError = AlwaysError class _error: pass _error.EdgeType = "first argument should be an edge" _error.ResetType = "reset argument should be a ResetSignal" _error.ArgType = "decorated object should be a classic (non-generator) function" _error.NrOfArgs = "decorated function should not have arguments" _error.SigAugAssign = "signal assignment does not support augmented assignment" _error.EmbeddedFunction = "embedded functions in always_seq function not supported" class ResetSignal(_Signal): def __init__(self, val, active, async): """ Construct a ResetSignal. This is to be used in conjunction with the always_seq decorator, as the reset argument. """ _Signal.__init__(self, bool(val)) self.active = bool(active) self.async = async def always_seq(edge, reset): callinfo = _getCallInfo() sigargs = [] if not isinstance(edge, _WaiterList): raise AlwaysSeqError(_error.EdgeType) edge.sig._read = True edge.sig._used = True sigargs.append(edge.sig) if reset is not None: if not isinstance(reset, ResetSignal): raise AlwaysSeqError(_error.ResetType) reset._read = True reset._used = True sigargs.append(reset) sigdict = _get_sigdict(sigargs, callinfo.symdict) def _always_seq_decorator(func): if not isinstance(func, FunctionType): raise AlwaysSeqError(_error.ArgType) if _isGenFunc(func): raise AlwaysSeqError(_error.ArgType) if func.__code__.co_argcount > 0: raise AlwaysSeqError(_error.NrOfArgs) return _AlwaysSeq(func, edge, reset, callinfo=callinfo, sigdict=sigdict) return _always_seq_decorator class _AlwaysSeq(_Always): def __init__(self, func, edge, reset, callinfo, sigdict): senslist = [edge] self.reset = reset if reset is not None: self.genfunc = self.genfunc_reset active = self.reset.active async = self.reset.async if async: if active: senslist.append(reset.posedge) else: senslist.append(reset.negedge) else: self.genfunc = self.genfunc_no_reset super(_AlwaysSeq, self).__init__( func, senslist, callinfo=callinfo, sigdict=sigdict) if self.inouts: raise AlwaysSeqError(_error.SigAugAssign, self.inouts) if self.embedded_func: raise AlwaysSeqError(_error.EmbeddedFunction) sigregs = self.sigregs = [] varregs = self.varregs = [] for n in self.outputs: reg = self.symdict[n] if isinstance(reg, _Signal): sigregs.append(reg) elif isinstance(reg, intbv): varregs.append((n, reg, int(reg))) else: assert _isListOfSigs(reg) for e in reg: sigregs.append(e) def reset_sigs(self): for s in self.sigregs: s.next = s._init def reset_vars(self): for v in self.varregs: # only intbv's for now _, reg, init = v reg._val = init def genfunc_reset(self): senslist = self.senslist if len(senslist) == 1: senslist = senslist[0] reset_sigs = self.reset_sigs reset_vars = self.reset_vars func = self.func while 1: yield senslist if self.reset == self.reset.active: reset_sigs() reset_vars() else: func() def genfunc_no_reset(self): senslist = self.senslist assert len(senslist) == 1 senslist = senslist[0] func = self.func while 1: yield senslist func()
lgpl-2.1
npiganeau/odoo
addons/hr_recruitment/report/__init__.py
442
1107
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import hr_recruitment_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
myfleetingtime/spark2.11_bingo
python/pyspark/context.py
10
42895
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import os import shutil import signal import sys import threading import warnings from threading import RLock from tempfile import NamedTemporaryFile from py4j.protocol import Py4JError from pyspark import accumulators from pyspark.accumulators import Accumulator from pyspark.broadcast import Broadcast from pyspark.conf import SparkConf from pyspark.files import SparkFiles from pyspark.java_gateway import launch_gateway from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \ PairDeserializer, AutoBatchedSerializer, NoOpSerializer from pyspark.storagelevel import StorageLevel from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix from pyspark.traceback_utils import CallSite, first_spark_call from pyspark.status import StatusTracker from pyspark.profiler import ProfilerCollector, BasicProfiler if sys.version > '3': xrange = range __all__ = ['SparkContext'] # These are special default configs for PySpark, they will overwrite # the default ones for Spark if they are not configured by user. DEFAULT_CONFIGS = { "spark.serializer.objectStreamReset": 100, "spark.rdd.compress": True, } class SparkContext(object): """ Main entry point for Spark functionality. A SparkContext represents the connection to a Spark cluster, and can be used to create L{RDD} and broadcast variables on that cluster. """ _gateway = None _jvm = None _next_accum_id = 0 _active_spark_context = None _lock = RLock() _python_includes = None # zip and egg files that need to be added to PYTHONPATH PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar') def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None, batchSize=0, serializer=PickleSerializer(), conf=None, gateway=None, jsc=None, profiler_cls=BasicProfiler): """ Create a new SparkContext. At least the master and app name should be set, either through the named parameters here or through C{conf}. :param master: Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). :param appName: A name for your job, to display on the cluster web UI. :param sparkHome: Location where Spark is installed on cluster nodes. :param pyFiles: Collection of .zip or .py files to send to the cluster and add to PYTHONPATH. These can be paths on the local file system or HDFS, HTTP, HTTPS, or FTP URLs. :param environment: A dictionary of environment variables to set on worker nodes. :param batchSize: The number of Python objects represented as a single Java object. Set 1 to disable batching, 0 to automatically choose the batch size based on object sizes, or -1 to use an unlimited batch size :param serializer: The serializer for RDDs. :param conf: A L{SparkConf} object setting Spark properties. :param gateway: Use an existing gateway and JVM, otherwise a new JVM will be instantiated. :param jsc: The JavaSparkContext instance (optional). :param profiler_cls: A class of custom Profiler used to do profiling (default is pyspark.profiler.BasicProfiler). >>> from pyspark.context import SparkContext >>> sc = SparkContext('local', 'test') >>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... """ self._callsite = first_spark_call() or CallSite(None, None, None) SparkContext._ensure_initialized(self, gateway=gateway, conf=conf) try: self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, jsc, profiler_cls) except: # If an error occurs, clean up in order to allow future SparkContext creation: self.stop() raise def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, jsc, profiler_cls): self.environment = environment or {} # java gateway must have been launched at this point. if conf is not None and conf._jconf is not None: # conf has been initialized in JVM properly, so use conf directly. This represent the # scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is # created and then stopped, and we create a new SparkConf and new SparkContext again) self._conf = conf else: self._conf = SparkConf(_jvm=SparkContext._jvm) self._batchSize = batchSize # -1 represents an unlimited batch size self._unbatched_serializer = serializer if batchSize == 0: self.serializer = AutoBatchedSerializer(self._unbatched_serializer) else: self.serializer = BatchedSerializer(self._unbatched_serializer, batchSize) # Set any parameters passed directly to us on the conf if master: self._conf.setMaster(master) if appName: self._conf.setAppName(appName) if sparkHome: self._conf.setSparkHome(sparkHome) if environment: for key, value in environment.items(): self._conf.setExecutorEnv(key, value) for key, value in DEFAULT_CONFIGS.items(): self._conf.setIfMissing(key, value) # Check that we have at least the required parameters if not self._conf.contains("spark.master"): raise Exception("A master URL must be set in your configuration") if not self._conf.contains("spark.app.name"): raise Exception("An application name must be set in your configuration") # Read back our properties from the conf in case we loaded some of them from # the classpath or an external config file self.master = self._conf.get("spark.master") self.appName = self._conf.get("spark.app.name") self.sparkHome = self._conf.get("spark.home", None) for (k, v) in self._conf.getAll(): if k.startswith("spark.executorEnv."): varName = k[len("spark.executorEnv."):] self.environment[varName] = v if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ: # disable randomness of hash of string in worker, if this is not # launched by spark-submit self.environment["PYTHONHASHSEED"] = "0" # Create the Java SparkContext through Py4J self._jsc = jsc or self._initialize_context(self._conf._jconf) # Reset the SparkConf to the one actually used by the SparkContext in JVM. self._conf = SparkConf(_jconf=self._jsc.sc().conf()) # Create a single Accumulator in Java that we'll send all our updates through; # they will be passed back to us through a TCP server self._accumulatorServer = accumulators._start_update_server() (host, port) = self._accumulatorServer.server_address self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port) self._jsc.sc().register(self._javaAccumulator) self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python') self.pythonVer = "%d.%d" % sys.version_info[:2] if sys.version_info < (2, 7): warnings.warn("Support for Python 2.6 is deprecated as of Spark 2.0.0") # Broadcast's __reduce__ method stores Broadcast instances here. # This allows other code to determine which Broadcast instances have # been pickled, so it can determine which Java broadcast objects to # send. self._pickled_broadcast_vars = set() SparkFiles._sc = self root_dir = SparkFiles.getRootDirectory() sys.path.insert(1, root_dir) # Deploy any code dependencies specified in the constructor self._python_includes = list() for path in (pyFiles or []): self.addPyFile(path) # Deploy code dependencies set by spark-submit; these will already have been added # with SparkContext.addFile, so we just need to add them to the PYTHONPATH for path in self._conf.get("spark.submit.pyFiles", "").split(","): if path != "": (dirname, filename) = os.path.split(path) if filename[-4:].lower() in self.PACKAGE_EXTENSIONS: self._python_includes.append(filename) sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename)) # Create a temporary directory inside spark.local.dir: local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf()) self._temp_dir = \ self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \ .getAbsolutePath() # profiling stats collected for each PythonRDD if self._conf.get("spark.python.profile", "false") == "true": dump_path = self._conf.get("spark.python.profile.dump", None) self.profiler_collector = ProfilerCollector(profiler_cls, dump_path) else: self.profiler_collector = None # create a signal handler which would be invoked on receiving SIGINT def signal_handler(signal, frame): self.cancelAllJobs() raise KeyboardInterrupt() # see http://stackoverflow.com/questions/23206787/ if isinstance(threading.current_thread(), threading._MainThread): signal.signal(signal.SIGINT, signal_handler) def _initialize_context(self, jconf): """ Initialize SparkContext in function to allow subclass specific initialization """ return self._jvm.JavaSparkContext(jconf) @classmethod def _ensure_initialized(cls, instance=None, gateway=None, conf=None): """ Checks whether a SparkContext is initialized or not. Throws error if a SparkContext is already running. """ with SparkContext._lock: if not SparkContext._gateway: SparkContext._gateway = gateway or launch_gateway(conf) SparkContext._jvm = SparkContext._gateway.jvm if instance: if (SparkContext._active_spark_context and SparkContext._active_spark_context != instance): currentMaster = SparkContext._active_spark_context.master currentAppName = SparkContext._active_spark_context.appName callsite = SparkContext._active_spark_context._callsite # Raise error if there is already a running Spark context raise ValueError( "Cannot run multiple SparkContexts at once; " "existing SparkContext(app=%s, master=%s)" " created by %s at %s:%s " % (currentAppName, currentMaster, callsite.function, callsite.file, callsite.linenum)) else: SparkContext._active_spark_context = instance def __getnewargs__(self): # This method is called when attempting to pickle SparkContext, which is always an error: raise Exception( "It appears that you are attempting to reference SparkContext from a broadcast " "variable, action, or transformation. SparkContext can only be used on the driver, " "not in code that it run on workers. For more information, see SPARK-5063." ) def __enter__(self): """ Enable 'with SparkContext(...) as sc: app(sc)' syntax. """ return self def __exit__(self, type, value, trace): """ Enable 'with SparkContext(...) as sc: app' syntax. Specifically stop the context on exit of the with block. """ self.stop() @classmethod def getOrCreate(cls, conf=None): """ Get or instantiate a SparkContext and register it as a singleton object. :param conf: SparkConf (optional) """ with SparkContext._lock: if SparkContext._active_spark_context is None: SparkContext(conf=conf or SparkConf()) return SparkContext._active_spark_context def setLogLevel(self, logLevel): """ Control our logLevel. This overrides any user-defined log settings. Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN """ self._jsc.setLogLevel(logLevel) @classmethod def setSystemProperty(cls, key, value): """ Set a Java system property, such as spark.executor.memory. This must must be invoked before instantiating SparkContext. """ SparkContext._ensure_initialized() SparkContext._jvm.java.lang.System.setProperty(key, value) @property def version(self): """ The version of Spark on which this application is running. """ return self._jsc.version() @property @ignore_unicode_prefix def applicationId(self): """ A unique identifier for the Spark application. Its format depends on the scheduler implementation. * in case of local spark app something like 'local-1433865536131' * in case of YARN something like 'application_1433865536131_34483' >>> sc.applicationId # doctest: +ELLIPSIS u'local-...' """ return self._jsc.sc().applicationId() @property def uiWebUrl(self): """Return the URL of the SparkUI instance started by this SparkContext""" return self._jsc.sc().uiWebUrl().get() @property def startTime(self): """Return the epoch time when the Spark Context was started.""" return self._jsc.startTime() @property def defaultParallelism(self): """ Default level of parallelism to use when not given by user (e.g. for reduce tasks) """ return self._jsc.sc().defaultParallelism() @property def defaultMinPartitions(self): """ Default min number of partitions for Hadoop RDDs when not given by user """ return self._jsc.sc().defaultMinPartitions() def stop(self): """ Shut down the SparkContext. """ if getattr(self, "_jsc", None): try: self._jsc.stop() except Py4JError: # Case: SPARK-18523 warnings.warn( 'Unable to cleanly shutdown Spark JVM process.' ' It is possible that the process has crashed,' ' been killed or may also be in a zombie state.', RuntimeWarning ) pass finally: self._jsc = None if getattr(self, "_accumulatorServer", None): self._accumulatorServer.shutdown() self._accumulatorServer = None with SparkContext._lock: SparkContext._active_spark_context = None def emptyRDD(self): """ Create an RDD that has no partitions or elements. """ return RDD(self._jsc.emptyRDD(), self, NoOpSerializer()) def range(self, start, end=None, step=1, numSlices=None): """ Create a new RDD of int containing elements from `start` to `end` (exclusive), increased by `step` every element. Can be called the same way as python's built-in range() function. If called with a single argument, the argument is interpreted as `end`, and `start` is set to 0. :param start: the start value :param end: the end value (exclusive) :param step: the incremental step (default: 1) :param numSlices: the number of partitions of the new RDD :return: An RDD of int >>> sc.range(5).collect() [0, 1, 2, 3, 4] >>> sc.range(2, 4).collect() [2, 3] >>> sc.range(1, 7, 2).collect() [1, 3, 5] """ if end is None: end = start start = 0 return self.parallelize(xrange(start, end, step), numSlices) def parallelize(self, c, numSlices=None): """ Distribute a local Python collection to form an RDD. Using xrange is recommended if the input represents a range for performance. >>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect() [[0], [2], [3], [4], [6]] >>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect() [[], [0], [], [2], [4]] """ numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism if isinstance(c, xrange): size = len(c) if size == 0: return self.parallelize([], numSlices) step = c[1] - c[0] if size > 1 else 1 start0 = c[0] def getStart(split): return start0 + int((split * size / numSlices)) * step def f(split, iterator): return xrange(getStart(split), getStart(split + 1), step) return self.parallelize([], numSlices).mapPartitionsWithIndex(f) # Calling the Java parallelize() method with an ArrayList is too slow, # because it sends O(n) Py4J commands. As an alternative, serialized # objects are written to a file and loaded through textFile(). tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir) try: # Make sure we distribute data evenly if it's smaller than self.batchSize if "__len__" not in dir(c): c = list(c) # Make it a list so we can compute its length batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024)) serializer = BatchedSerializer(self._unbatched_serializer, batchSize) serializer.dump_stream(c, tempFile) tempFile.close() readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices) finally: # readRDDFromFile eagerily reads the file so we can delete right after. os.unlink(tempFile.name) return RDD(jrdd, self, serializer) def pickleFile(self, name, minPartitions=None): """ Load an RDD previously saved using L{RDD.saveAsPickleFile} method. >>> tmpFile = NamedTemporaryFile(delete=True) >>> tmpFile.close() >>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5) >>> sorted(sc.pickleFile(tmpFile.name, 3).collect()) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ minPartitions = minPartitions or self.defaultMinPartitions return RDD(self._jsc.objectFile(name, minPartitions), self) @ignore_unicode_prefix def textFile(self, name, minPartitions=None, use_unicode=True): """ Read a text file from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI, and return it as an RDD of Strings. If use_unicode is False, the strings will be kept as `str` (encoding as `utf-8`), which is faster and smaller than unicode. (Added in Spark 1.2) >>> path = os.path.join(tempdir, "sample-text.txt") >>> with open(path, "w") as testFile: ... _ = testFile.write("Hello world!") >>> textFile = sc.textFile(path) >>> textFile.collect() [u'Hello world!'] """ minPartitions = minPartitions or min(self.defaultParallelism, 2) return RDD(self._jsc.textFile(name, minPartitions), self, UTF8Deserializer(use_unicode)) @ignore_unicode_prefix def wholeTextFiles(self, path, minPartitions=None, use_unicode=True): """ Read a directory of text files from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI. Each file is read as a single record and returned in a key-value pair, where the key is the path of each file, the value is the content of each file. If use_unicode is False, the strings will be kept as `str` (encoding as `utf-8`), which is faster and smaller than unicode. (Added in Spark 1.2) For example, if you have the following files:: hdfs://a-hdfs-path/part-00000 hdfs://a-hdfs-path/part-00001 ... hdfs://a-hdfs-path/part-nnnnn Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")}, then C{rdd} contains:: (a-hdfs-path/part-00000, its content) (a-hdfs-path/part-00001, its content) ... (a-hdfs-path/part-nnnnn, its content) .. note:: Small files are preferred, as each file will be loaded fully in memory. >>> dirPath = os.path.join(tempdir, "files") >>> os.mkdir(dirPath) >>> with open(os.path.join(dirPath, "1.txt"), "w") as file1: ... _ = file1.write("1") >>> with open(os.path.join(dirPath, "2.txt"), "w") as file2: ... _ = file2.write("2") >>> textFiles = sc.wholeTextFiles(dirPath) >>> sorted(textFiles.collect()) [(u'.../1.txt', u'1'), (u'.../2.txt', u'2')] """ minPartitions = minPartitions or self.defaultMinPartitions return RDD(self._jsc.wholeTextFiles(path, minPartitions), self, PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode))) def binaryFiles(self, path, minPartitions=None): """ .. note:: Experimental Read a directory of binary files from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI as a byte array. Each file is read as a single record and returned in a key-value pair, where the key is the path of each file, the value is the content of each file. .. note:: Small files are preferred, large file is also allowable, but may cause bad performance. """ minPartitions = minPartitions or self.defaultMinPartitions return RDD(self._jsc.binaryFiles(path, minPartitions), self, PairDeserializer(UTF8Deserializer(), NoOpSerializer())) def binaryRecords(self, path, recordLength): """ .. note:: Experimental Load data from a flat binary file, assuming each record is a set of numbers with the specified numerical format (see ByteBuffer), and the number of bytes per record is constant. :param path: Directory to the input data files :param recordLength: The length at which to split the records """ return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer()) def _dictToJavaMap(self, d): jm = self._jvm.java.util.HashMap() if not d: d = {} for k, v in d.items(): jm[k] = v return jm def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, minSplits=None, batchSize=0): """ Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI. The mechanism is as follows: 1. A Java RDD is created from the SequenceFile or other InputFormat, and the key and value Writable classes 2. Serialization is attempted via Pyrolite pickling 3. If this fails, the fallback is to call 'toString' on each key and value 4. C{PickleSerializer} is used to deserialize pickled objects on the Python side :param path: path to sequncefile :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text") :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.LongWritable") :param keyConverter: :param valueConverter: :param minSplits: minimum splits in dataset (default min(2, sc.defaultParallelism)) :param batchSize: The number of Python objects represented as a single Java object. (default 0, choose batchSize automatically) """ minSplits = minSplits or min(self.defaultParallelism, 2) jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass, keyConverter, valueConverter, minSplits, batchSize) return RDD(jrdd, self) def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None, valueConverter=None, conf=None, batchSize=0): """ Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI. The mechanism is the same as for sc.sequenceFile. A Hadoop configuration can be passed in as a Python dict. This will be converted into a Configuration in Java :param path: path to Hadoop file :param inputFormatClass: fully qualified classname of Hadoop InputFormat (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text") :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.LongWritable") :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: Hadoop configuration, passed in as a dict (None by default) :param batchSize: The number of Python objects represented as a single Java object. (default 0, choose batchSize automatically) """ jconf = self._dictToJavaMap(conf) jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf, batchSize) return RDD(jrdd, self) def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None, valueConverter=None, conf=None, batchSize=0): """ Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary Hadoop configuration, which is passed in as a Python dict. This will be converted into a Configuration in Java. The mechanism is the same as for sc.sequenceFile. :param inputFormatClass: fully qualified classname of Hadoop InputFormat (e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text") :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.LongWritable") :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: Hadoop configuration, passed in as a dict (None by default) :param batchSize: The number of Python objects represented as a single Java object. (default 0, choose batchSize automatically) """ jconf = self._dictToJavaMap(conf) jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf, batchSize) return RDD(jrdd, self) def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None, valueConverter=None, conf=None, batchSize=0): """ Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI. The mechanism is the same as for sc.sequenceFile. A Hadoop configuration can be passed in as a Python dict. This will be converted into a Configuration in Java. :param path: path to Hadoop file :param inputFormatClass: fully qualified classname of Hadoop InputFormat (e.g. "org.apache.hadoop.mapred.TextInputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text") :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.LongWritable") :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: Hadoop configuration, passed in as a dict (None by default) :param batchSize: The number of Python objects represented as a single Java object. (default 0, choose batchSize automatically) """ jconf = self._dictToJavaMap(conf) jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf, batchSize) return RDD(jrdd, self) def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None, valueConverter=None, conf=None, batchSize=0): """ Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary Hadoop configuration, which is passed in as a Python dict. This will be converted into a Configuration in Java. The mechanism is the same as for sc.sequenceFile. :param inputFormatClass: fully qualified classname of Hadoop InputFormat (e.g. "org.apache.hadoop.mapred.TextInputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text") :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.LongWritable") :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: Hadoop configuration, passed in as a dict (None by default) :param batchSize: The number of Python objects represented as a single Java object. (default 0, choose batchSize automatically) """ jconf = self._dictToJavaMap(conf) jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf, batchSize) return RDD(jrdd, self) def _checkpointFile(self, name, input_deserializer): jrdd = self._jsc.checkpointFile(name) return RDD(jrdd, self, input_deserializer) @ignore_unicode_prefix def union(self, rdds): """ Build the union of a list of RDDs. This supports unions() of RDDs with different serialized formats, although this forces them to be reserialized using the default serializer: >>> path = os.path.join(tempdir, "union-text.txt") >>> with open(path, "w") as testFile: ... _ = testFile.write("Hello") >>> textFile = sc.textFile(path) >>> textFile.collect() [u'Hello'] >>> parallelized = sc.parallelize(["World!"]) >>> sorted(sc.union([textFile, parallelized]).collect()) [u'Hello', 'World!'] """ first_jrdd_deserializer = rdds[0]._jrdd_deserializer if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds): rdds = [x._reserialize() for x in rdds] first = rdds[0]._jrdd rest = [x._jrdd for x in rdds[1:]] return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer) def broadcast(self, value): """ Broadcast a read-only variable to the cluster, returning a L{Broadcast<pyspark.broadcast.Broadcast>} object for reading it in distributed functions. The variable will be sent to each cluster only once. """ return Broadcast(self, value, self._pickled_broadcast_vars) def accumulator(self, value, accum_param=None): """ Create an L{Accumulator} with the given initial value, using a given L{AccumulatorParam} helper object to define how to add values of the data type if provided. Default AccumulatorParams are used for integers and floating-point numbers if you do not provide one. For other types, a custom AccumulatorParam can be used. """ if accum_param is None: if isinstance(value, int): accum_param = accumulators.INT_ACCUMULATOR_PARAM elif isinstance(value, float): accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM elif isinstance(value, complex): accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM else: raise TypeError("No default accumulator param for type %s" % type(value)) SparkContext._next_accum_id += 1 return Accumulator(SparkContext._next_accum_id - 1, value, accum_param) def addFile(self, path, recursive=False): """ Add a file to be downloaded with this Spark job on every node. The C{path} passed can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, use L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the filename to find its download location. A directory can be given if the recursive option is set to True. Currently directories are only supported for Hadoop-supported filesystems. >>> from pyspark import SparkFiles >>> path = os.path.join(tempdir, "test.txt") >>> with open(path, "w") as testFile: ... _ = testFile.write("100") >>> sc.addFile(path) >>> def func(iterator): ... with open(SparkFiles.get("test.txt")) as testFile: ... fileVal = int(testFile.readline()) ... return [x * fileVal for x in iterator] >>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect() [100, 200, 300, 400] """ self._jsc.sc().addFile(path, recursive) def addPyFile(self, path): """ Add a .py or .zip dependency for all tasks to be executed on this SparkContext in the future. The C{path} passed can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), or an HTTP, HTTPS or FTP URI. """ self.addFile(path) (dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix if filename[-4:].lower() in self.PACKAGE_EXTENSIONS: self._python_includes.append(filename) # for tests in local mode sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename)) if sys.version > '3': import importlib importlib.invalidate_caches() def setCheckpointDir(self, dirName): """ Set the directory under which RDDs are going to be checkpointed. The directory must be a HDFS path if running on a cluster. """ self._jsc.sc().setCheckpointDir(dirName) def _getJavaStorageLevel(self, storageLevel): """ Returns a Java StorageLevel based on a pyspark.StorageLevel. """ if not isinstance(storageLevel, StorageLevel): raise Exception("storageLevel must be of type pyspark.StorageLevel") newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel return newStorageLevel(storageLevel.useDisk, storageLevel.useMemory, storageLevel.useOffHeap, storageLevel.deserialized, storageLevel.replication) def setJobGroup(self, groupId, description, interruptOnCancel=False): """ Assigns a group ID to all the jobs started by this thread until the group ID is set to a different value or cleared. Often, a unit of execution in an application consists of multiple Spark actions or jobs. Application programmers can use this method to group all those jobs together and give a group description. Once set, the Spark web UI will associate such jobs with this group. The application can use L{SparkContext.cancelJobGroup} to cancel all running jobs in this group. >>> import threading >>> from time import sleep >>> result = "Not Set" >>> lock = threading.Lock() >>> def map_func(x): ... sleep(100) ... raise Exception("Task should have been cancelled") >>> def start_job(x): ... global result ... try: ... sc.setJobGroup("job_to_cancel", "some description") ... result = sc.parallelize(range(x)).map(map_func).collect() ... except Exception as e: ... result = "Cancelled" ... lock.release() >>> def stop_job(): ... sleep(5) ... sc.cancelJobGroup("job_to_cancel") >>> supress = lock.acquire() >>> supress = threading.Thread(target=start_job, args=(10,)).start() >>> supress = threading.Thread(target=stop_job).start() >>> supress = lock.acquire() >>> print(result) Cancelled If interruptOnCancel is set to true for the job group, then job cancellation will result in Thread.interrupt() being called on the job's executor threads. This is useful to help ensure that the tasks are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead. """ self._jsc.setJobGroup(groupId, description, interruptOnCancel) def setLocalProperty(self, key, value): """ Set a local property that affects jobs submitted from this thread, such as the Spark fair scheduler pool. """ self._jsc.setLocalProperty(key, value) def getLocalProperty(self, key): """ Get a local property set in this thread, or null if it is missing. See L{setLocalProperty} """ return self._jsc.getLocalProperty(key) def sparkUser(self): """ Get SPARK_USER for user who is running SparkContext. """ return self._jsc.sc().sparkUser() def cancelJobGroup(self, groupId): """ Cancel active jobs for the specified group. See L{SparkContext.setJobGroup} for more information. """ self._jsc.sc().cancelJobGroup(groupId) def cancelAllJobs(self): """ Cancel all jobs that have been scheduled or are running. """ self._jsc.sc().cancelAllJobs() def statusTracker(self): """ Return :class:`StatusTracker` object """ return StatusTracker(self._jsc.statusTracker()) def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False): """ Executes the given partitionFunc on the specified set of partitions, returning the result as an array of elements. If 'partitions' is not specified, this will run over all partitions. >>> myRDD = sc.parallelize(range(6), 3) >>> sc.runJob(myRDD, lambda part: [x * x for x in part]) [0, 1, 4, 9, 16, 25] >>> myRDD = sc.parallelize(range(6), 3) >>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True) [0, 1, 16, 25] """ if partitions is None: partitions = range(rdd._jrdd.partitions().size()) # Implementation note: This is implemented as a mapPartitions followed # by runJob() in order to avoid having to pass a Python lambda into # SparkContext#runJob. mappedRDD = rdd.mapPartitions(partitionFunc) port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions) return list(_load_from_socket(port, mappedRDD._jrdd_deserializer)) def show_profiles(self): """ Print the profile stats to stdout """ self.profiler_collector.show_profiles() def dump_profiles(self, path): """ Dump the profile stats into directory `path` """ self.profiler_collector.dump_profiles(path) def getConf(self): conf = SparkConf() conf.setAll(self._conf.getAll()) return conf def _test(): import atexit import doctest import tempfile globs = globals().copy() globs['sc'] = SparkContext('local[4]', 'PythonTest') globs['tempdir'] = tempfile.mkdtemp() atexit.register(lambda: shutil.rmtree(globs['tempdir'])) (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
apache-2.0
drammock/mne-python
mne/io/egi/general.py
14
6395
# -*- coding: utf-8 -*- # # License: BSD (3-clause) import os from xml.dom.minidom import parse import re import numpy as np from ...utils import _pl def _extract(tags, filepath=None, obj=None): """Extract info from XML.""" if obj is not None: fileobj = obj elif filepath is not None: fileobj = parse(filepath) else: raise ValueError('There is not object or file to extract data') infoxml = dict() for tag in tags: value = fileobj.getElementsByTagName(tag) infoxml[tag] = [] for i in range(len(value)): infoxml[tag].append(value[i].firstChild.data) return infoxml def _get_gains(filepath): """Parse gains.""" file_obj = parse(filepath) objects = file_obj.getElementsByTagName('calibration') gains = dict() for ob in objects: value = ob.getElementsByTagName('type') if value[0].firstChild.data == 'GCAL': data_g = _extract(['ch'], obj=ob)['ch'] gains.update(gcal=np.asarray(data_g, dtype=np.float64)) elif value[0].firstChild.data == 'ICAL': data_g = _extract(['ch'], obj=ob)['ch'] gains.update(ical=np.asarray(data_g, dtype=np.float64)) return gains def _get_ep_info(filepath): """Get epoch info.""" epochfile = filepath + '/epochs.xml' epochlist = parse(epochfile) epochs = epochlist.getElementsByTagName('epoch') keys = ('first_samps', 'last_samps', 'first_blocks', 'last_blocks') epoch_info = {key: list() for key in keys} for epoch in epochs: ep_begin = int(epoch.getElementsByTagName('beginTime')[0] .firstChild.data) ep_end = int(epoch.getElementsByTagName('endTime')[0].firstChild.data) first_block = int(epoch.getElementsByTagName('firstBlock')[0] .firstChild.data) last_block = int(epoch.getElementsByTagName('lastBlock')[0] .firstChild.data) epoch_info['first_samps'].append(ep_begin) epoch_info['last_samps'].append(ep_end) epoch_info['first_blocks'].append(first_block) epoch_info['last_blocks'].append(last_block) # Don't turn into ndarray here, keep native int because it can deal with # huge numbers (could use np.uint64 but it's more work) return epoch_info def _get_blocks(filepath): """Get info from meta data blocks.""" binfile = os.path.join(filepath) n_blocks = 0 samples_block = [] header_sizes = [] n_channels = [] sfreq = [] # Meta data consists of: # * 1 byte of flag (1 for meta data, 0 for data) # * 1 byte of header size # * 1 byte of block size # * 1 byte of n_channels # * n_channels bytes of offsets # * n_channels bytes of sigfreqs? with open(binfile, 'rb') as fid: fid.seek(0, 2) # go to end of file file_length = fid.tell() block_size = file_length fid.seek(0) position = 0 while position < file_length: block = _block_r(fid) if block is None: samples_block.append(samples_block[n_blocks - 1]) n_blocks += 1 fid.seek(block_size, 1) position = fid.tell() continue block_size = block['block_size'] header_size = block['header_size'] header_sizes.append(header_size) samples_block.append(block['nsamples']) n_blocks += 1 fid.seek(block_size, 1) sfreq.append(block['sfreq']) n_channels.append(block['nc']) position = fid.tell() if any([n != n_channels[0] for n in n_channels]): raise RuntimeError("All the blocks don't have the same amount of " "channels.") if any([f != sfreq[0] for f in sfreq]): raise RuntimeError("All the blocks don't have the same sampling " "frequency.") if len(samples_block) < 1: raise RuntimeError("There seems to be no data") samples_block = np.array(samples_block) signal_blocks = dict(n_channels=n_channels[0], sfreq=sfreq[0], n_blocks=n_blocks, samples_block=samples_block, header_sizes=header_sizes) return signal_blocks def _get_signalfname(filepath): """Get filenames.""" listfiles = os.listdir(filepath) binfiles = list(f for f in listfiles if 'signal' in f and f[-4:] == '.bin' and f[0] != '.') all_files = {} infofiles = list() for binfile in binfiles: bin_num_str = re.search(r'\d+', binfile).group() infofile = 'info' + bin_num_str + '.xml' infofiles.append(infofile) infobjfile = os.path.join(filepath, infofile) infobj = parse(infobjfile) if len(infobj.getElementsByTagName('EEG')): signal_type = 'EEG' elif len(infobj.getElementsByTagName('PNSData')): signal_type = 'PNS' all_files[signal_type] = { 'signal': 'signal{}.bin'.format(bin_num_str), 'info': infofile} if 'EEG' not in all_files: raise FileNotFoundError( 'Could not find any EEG data in the %d file%s found in %s:\n%s' % (len(infofiles), _pl(infofiles), filepath, '\n'.join(infofiles))) return all_files def _block_r(fid): """Read meta data.""" if np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] != 1: # not metadata return None header_size = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] block_size = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] hl = int(block_size / 4) nc = np.fromfile(fid, dtype=np.dtype('i4'), count=1)[0] nsamples = int(hl / nc) np.fromfile(fid, dtype=np.dtype('i4'), count=nc) # sigoffset sigfreq = np.fromfile(fid, dtype=np.dtype('i4'), count=nc) depth = sigfreq[0] & 0xFF if depth != 32: raise ValueError('I do not know how to read this MFF (depth != 32)') sfreq = sigfreq[0] >> 8 count = int(header_size / 4 - (4 + 2 * nc)) np.fromfile(fid, dtype=np.dtype('i4'), count=count) # sigoffset block = dict(nc=nc, hl=hl, nsamples=nsamples, block_size=block_size, header_size=header_size, sfreq=sfreq) return block
bsd-3-clause
mtconley/turntable
test/lib/python2.7/site-packages/scipy/stats/mstats_basic.py
7
65058
""" An extension of scipy.stats.stats to support masked arrays """ # Original author (2007): Pierre GF Gerard-Marchant # TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ? # TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ? # TODO : reimplement ksonesamp from __future__ import division, print_function, absolute_import __all__ = ['argstoarray', 'betai', 'chisquare','count_tied_groups', 'describe', 'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare', 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis', 'ks_twosamp','ks_2samp','kurtosis','kurtosistest', 'linregress', 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign', 'normaltest', 'obrientransform', 'pearsonr','plotting_positions','pointbiserialr', 'rankdata', 'scoreatpercentile','sem', 'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr', 'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth', 'trimtail','trima','trimr','trimmed_mean','trimmed_std', 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp', 'ttest_ind','ttest_rel','tvar', 'variation', 'winsorize', 'zmap', 'zscore' ] import numpy as np from numpy import ndarray import numpy.ma as ma from numpy.ma import masked, nomask from scipy.lib.six import iteritems import itertools import warnings from . import stats from . import distributions import scipy.special as special from . import futil genmissingvaldoc = """ Notes ----- Missing values are considered pair-wise: if a value is missing in x, the corresponding value in y is masked. """ def _chk_asarray(a, axis): # Always returns a masked array, raveled for axis=None a = ma.asanyarray(a) if axis is None: a = ma.ravel(a) outaxis = 0 else: outaxis = axis return a, outaxis def _chk2_asarray(a, b, axis): a = ma.asanyarray(a) b = ma.asanyarray(b) if axis is None: a = ma.ravel(a) b = ma.ravel(b) outaxis = 0 else: outaxis = axis return a, b, outaxis def _chk_size(a,b): a = ma.asanyarray(a) b = ma.asanyarray(b) (na, nb) = (a.size, b.size) if na != nb: raise ValueError("The size of the input array should match!" " (%s <> %s)" % (na, nb)) return (a, b, na) def argstoarray(*args): """ Constructs a 2D array from a group of sequences. Sequences are filled with missing values to match the length of the longest sequence. Parameters ---------- args : sequences Group of sequences. Returns ------- argstoarray : MaskedArray A ( `m` x `n` ) masked array, where `m` is the number of arguments and `n` the length of the longest argument. Notes ----- `numpy.ma.row_stack` has identical behavior, but is called with a sequence of sequences. """ if len(args) == 1 and not isinstance(args[0], ndarray): output = ma.asarray(args[0]) if output.ndim != 2: raise ValueError("The input should be 2D") else: n = len(args) m = max([len(k) for k in args]) output = ma.array(np.empty((n,m), dtype=float), mask=True) for (k,v) in enumerate(args): output[k,:len(v)] = v output[np.logical_not(np.isfinite(output._data))] = masked return output def find_repeats(arr): """Find repeats in arr and return a tuple (repeats, repeat_count). Masked values are discarded. Parameters ---------- arr : sequence Input array. The array is flattened if it is not 1D. Returns ------- repeats : ndarray Array of repeated values. counts : ndarray Array of counts. """ marr = ma.compressed(arr) if not marr.size: return (np.array(0), np.array(0)) (v1, v2, n) = futil.dfreps(ma.array(ma.compressed(arr), copy=True)) return (v1[:n], v2[:n]) def count_tied_groups(x, use_missing=False): """ Counts the number of tied values. Parameters ---------- x : sequence Sequence of data on which to counts the ties use_missing : boolean Whether to consider missing values as tied. Returns ------- count_tied_groups : dict Returns a dictionary (nb of ties: nb of groups). Examples -------- >>> from scipy.stats import mstats >>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6] >>> mstats.count_tied_groups(z) {2: 1, 3: 2} In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x). >>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6]) >>> mstats.count_tied_groups(z) {2: 2, 3: 1} >>> z[[1,-1]] = np.ma.masked >>> mstats.count_tied_groups(z, use_missing=True) {2: 2, 3: 1} """ nmasked = ma.getmask(x).sum() # We need the copy as find_repeats will overwrite the initial data data = ma.compressed(x).copy() (ties, counts) = find_repeats(data) nties = {} if len(ties): nties = dict(zip(np.unique(counts), itertools.repeat(1))) nties.update(dict(zip(*find_repeats(counts)))) if nmasked and use_missing: try: nties[nmasked] += 1 except KeyError: nties[nmasked] = 1 return nties def rankdata(data, axis=None, use_missing=False): """Returns the rank (also known as order statistics) of each data point along the given axis. If some values are tied, their rank is averaged. If some values are masked, their rank is set to 0 if use_missing is False, or set to the average rank of the unmasked values if use_missing is True. Parameters ---------- data : sequence Input data. The data is transformed to a masked array axis : {None,int}, optional Axis along which to perform the ranking. If None, the array is first flattened. An exception is raised if the axis is specified for arrays with a dimension larger than 2 use_missing : {boolean}, optional Whether the masked values have a rank of 0 (False) or equal to the average rank of the unmasked values (True). """ def _rank1d(data, use_missing=False): n = data.count() rk = np.empty(data.size, dtype=float) idx = data.argsort() rk[idx[:n]] = np.arange(1,n+1) if use_missing: rk[idx[n:]] = (n+1)/2. else: rk[idx[n:]] = 0 repeats = find_repeats(data.copy()) for r in repeats[0]: condition = (data == r).filled(False) rk[condition] = rk[condition].mean() return rk data = ma.array(data, copy=False) if axis is None: if data.ndim > 1: return _rank1d(data.ravel(), use_missing).reshape(data.shape) else: return _rank1d(data, use_missing) else: return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray) def mode(a, axis=0): a, axis = _chk_asarray(a, axis) def _mode1D(a): (rep,cnt) = find_repeats(a) if not cnt.ndim: return (0, 0) elif cnt.size: return (rep[cnt.argmax()], cnt.max()) else: not_masked_indices = ma.flatnotmasked_edges(a) first_not_masked_index = not_masked_indices[0] return (a[first_not_masked_index], 1) if axis is None: output = _mode1D(ma.ravel(a)) output = (ma.array(output[0]), ma.array(output[1])) else: output = ma.apply_along_axis(_mode1D, axis, a) newshape = list(a.shape) newshape[axis] = 1 slices = [slice(None)] * output.ndim slices[axis] = 0 modes = output[tuple(slices)].reshape(newshape) slices[axis] = 1 counts = output[tuple(slices)].reshape(newshape) output = (modes, counts) return output mode.__doc__ = stats.mode.__doc__ def betai(a, b, x): x = np.asanyarray(x) x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0 return special.betainc(a, b, x) betai.__doc__ = stats.betai.__doc__ def msign(x): """Returns the sign of x, or 0 if x is masked.""" return ma.filled(np.sign(x), 0) def pearsonr(x,y): """ Calculates a Pearson correlation coefficient and the p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. Strictly speaking, Pearson's correlation requires that each dataset be normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as `x` increases, so does `y`. Negative correlations imply that as `x` increases, `y` decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. Parameters ---------- x : 1-D array_like Input y : 1-D array_like Input Returns ------- pearsonr : float Pearson's correlation coefficient, 2-tailed p-value. References ---------- http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation """ (x, y, n) = _chk_size(x, y) (x, y) = (x.ravel(), y.ravel()) # Get the common mask and the total nb of unmasked elements m = ma.mask_or(ma.getmask(x), ma.getmask(y)) n -= m.sum() df = n-2 if df < 0: return (masked, masked) (mx, my) = (x.mean(), y.mean()) (xm, ym) = (x-mx, y-my) r_num = ma.add.reduce(xm*ym) r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym)) r = r_num / r_den # Presumably, if r > 1, then it is only some small artifact of floating # point arithmetic. r = min(r, 1.0) r = max(r, -1.0) df = n - 2 if r is masked or abs(r) == 1.0: prob = 0. else: t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r prob = betai(0.5*df, 0.5, df/(df + t_squared)) return r, prob def spearmanr(x, y, use_ties=True): """ Calculates a Spearman rank-order correlation coefficient and the p-value to test for non-correlation. The Spearman correlation is a nonparametric measure of the linear relationship between two datasets. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as `x` increases, so does `y`. Negative correlations imply that as `x` increases, `y` decreases. Missing values are discarded pair-wise: if a value is missing in `x`, the corresponding value in `y` is masked. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. Parameters ---------- x : array_like The length of `x` must be > 2. y : array_like The length of `y` must be > 2. use_ties : bool, optional Whether the correction for ties should be computed. Returns ------- spearmanr : float Spearman correlation coefficient, 2-tailed p-value. References ---------- [CRCProbStat2000] section 14.7 """ (x, y, n) = _chk_size(x, y) (x, y) = (x.ravel(), y.ravel()) m = ma.mask_or(ma.getmask(x), ma.getmask(y)) n -= m.sum() if m is not nomask: x = ma.array(x, mask=m, copy=True) y = ma.array(y, mask=m, copy=True) df = n-2 if df < 0: raise ValueError("The input must have at least 3 entries!") # Gets the ranks and rank differences rankx = rankdata(x) ranky = rankdata(y) dsq = np.add.reduce((rankx-ranky)**2) # Tie correction if use_ties: xties = count_tied_groups(x) yties = count_tied_groups(y) corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12. corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12. else: corr_x = corr_y = 0 denom = n*(n**2 - 1)/6. if corr_x != 0 or corr_y != 0: rho = denom - dsq - corr_x - corr_y rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y)) else: rho = 1. - dsq/denom t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho if t is masked: prob = 0. else: prob = betai(0.5*df,0.5,df/(df+t*t)) return rho, prob def kendalltau(x, y, use_ties=True, use_missing=False): """ Computes Kendall's rank correlation tau on two variables *x* and *y*. Parameters ---------- xdata : sequence First data list (for example, time). ydata : sequence Second data list. use_ties : {True, False}, optional Whether ties correction should be performed. use_missing : {False, True}, optional Whether missing data should be allocated a rank of 0 (False) or the average rank (True) Returns ------- tau : float Kendall tau prob : float Approximate 2-side p-value. """ (x, y, n) = _chk_size(x, y) (x, y) = (x.flatten(), y.flatten()) m = ma.mask_or(ma.getmask(x), ma.getmask(y)) if m is not nomask: x = ma.array(x, mask=m, copy=True) y = ma.array(y, mask=m, copy=True) n -= m.sum() if n < 2: return (np.nan, np.nan) rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0) ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0) idx = rx.argsort() (rx, ry) = (rx[idx], ry[idx]) C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum() for i in range(len(ry)-1)], dtype=float) D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum() for i in range(len(ry)-1)], dtype=float) if use_ties: xties = count_tied_groups(x) yties = count_tied_groups(y) corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float) corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float) denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.) else: denom = n*(n-1)/2. tau = (C-D) / denom var_s = n*(n-1)*(2*n+5) if use_ties: var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties)) var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties)) v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\ np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float) v1 /= 2.*n*(n-1) if n > 2: v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)], dtype=float) * \ np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)], dtype=float) v2 /= 9.*n*(n-1)*(n-2) else: v2 = 0 else: v1 = v2 = 0 var_s /= 18. var_s += (v1 + v2) z = (C-D)/np.sqrt(var_s) prob = special.erfc(abs(z)/np.sqrt(2)) return (tau, prob) def kendalltau_seasonal(x): """ Computes a multivariate Kendall's rank correlation tau, for seasonal data. Parameters ---------- x : 2-D ndarray Array of seasonal data, with seasons in columns. """ x = ma.array(x, subok=True, copy=False, ndmin=2) (n,m) = x.shape n_p = x.count(0) S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n)) S_tot = S_szn.sum() n_tot = x.count() ties = count_tied_groups(x.compressed()) corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties)) denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2. R = rankdata(x, axis=0, use_missing=True) K = ma.empty((m,m), dtype=int) covmat = ma.empty((m,m), dtype=float) denom_szn = ma.empty(m, dtype=float) for j in range(m): ties_j = count_tied_groups(x[:,j].compressed()) corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j)) cmb = n_p[j]*(n_p[j]-1) for k in range(j,m,1): K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum() for i in range(n)) covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() - n*(n_p[j]+1)*(n_p[k]+1))/3. K[k,j] = K[j,k] covmat[k,j] = covmat[j,k] denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2. var_szn = covmat.diagonal() z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn) z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum()) z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum()) prob_szn = special.erfc(abs(z_szn)/np.sqrt(2)) prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2)) prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2)) chi2_tot = (z_szn*z_szn).sum() chi2_trd = m * z_szn.mean()**2 output = {'seasonal tau': S_szn/denom_szn, 'global tau': S_tot/denom_tot, 'global tau (alt)': S_tot/denom_szn.sum(), 'seasonal p-value': prob_szn, 'global p-value (indep)': prob_tot_ind, 'global p-value (dep)': prob_tot_dep, 'chi2 total': chi2_tot, 'chi2 trend': chi2_trd, } return output def pointbiserialr(x, y): x = ma.fix_invalid(x, copy=True).astype(bool) y = ma.fix_invalid(y, copy=True).astype(float) # Get rid of the missing data m = ma.mask_or(ma.getmask(x), ma.getmask(y)) if m is not nomask: unmask = np.logical_not(m) x = x[unmask] y = y[unmask] n = len(x) # phat is the fraction of x values that are True phat = x.sum() / float(n) y0 = y[~x] # y-values where x is False y1 = y[x] # y-values where x is True y0m = y0.mean() y1m = y1.mean() rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std() df = n-2 t = rpb*ma.sqrt(df/(1.0-rpb**2)) prob = betai(0.5*df, 0.5, df/(df+t*t)) return rpb, prob if stats.pointbiserialr.__doc__: pointbiserialr.__doc__ = stats.pointbiserialr.__doc__ + genmissingvaldoc def linregress(*args): """ Linear regression calculation Note that the non-masked version is used, and that this docstring is replaced by the non-masked docstring + some info on missing data. """ if len(args) == 1: # Input is a single 2-D array containing x and y args = ma.array(args[0], copy=True) if len(args) == 2: x = args[0] y = args[1] else: x = args[:, 0] y = args[:, 1] else: # Input is two 1-D arrays x = ma.array(args[0]).flatten() y = ma.array(args[1]).flatten() m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False) if m is not nomask: x = ma.array(x, mask=m) y = ma.array(y, mask=m) if np.any(~m): slope, intercept, r, prob, sterrest = stats.linregress(x.data[~m], y.data[~m]) else: # All data is masked return None, None, None, None, None else: slope, intercept, r, prob, sterrest = stats.linregress(x.data, y.data) return slope, intercept, r, prob, sterrest if stats.linregress.__doc__: linregress.__doc__ = stats.linregress.__doc__ + genmissingvaldoc def theilslopes(y, x=None, alpha=0.95): y = ma.asarray(y).flatten() if x is None: x = ma.arange(len(y), dtype=float) else: x = ma.asarray(x).flatten() if len(x) != len(y): raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x))) m = ma.mask_or(ma.getmask(x), ma.getmask(y)) y._mask = x._mask = m # Disregard any masked elements of x or y y = y.compressed() x = x.compressed().astype(float) # We now have unmasked arrays so can use `stats.theilslopes` return stats.theilslopes(y, x, alpha=alpha) theilslopes.__doc__ = stats.theilslopes.__doc__ def sen_seasonal_slopes(x): x = ma.array(x, subok=True, copy=False, ndmin=2) (n,_) = x.shape # Get list of slopes per season szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None] for i in range(n)]) szn_medslopes = ma.median(szn_slopes, axis=0) medslope = ma.median(szn_slopes, axis=None) return szn_medslopes, medslope def ttest_1samp(a, popmean, axis=0): a, axis = _chk_asarray(a, axis) if a.size == 0: return (np.nan, np.nan) x = a.mean(axis=axis) v = a.var(axis=axis, ddof=1) n = a.count(axis=axis) df = n - 1. svar = ((n - 1) * v) / df t = (x - popmean) / ma.sqrt(svar / n) prob = betai(0.5 * df, 0.5, df / (df + t*t)) return t, prob ttest_1samp.__doc__ = stats.ttest_1samp.__doc__ ttest_onesamp = ttest_1samp def ttest_ind(a, b, axis=0): a, b, axis = _chk2_asarray(a, b, axis) if a.size == 0 or b.size == 0: return (np.nan, np.nan) (x1, x2) = (a.mean(axis), b.mean(axis)) (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1)) (n1, n2) = (a.count(axis), b.count(axis)) df = n1 + n2 - 2. svar = ((n1-1)*v1+(n2-1)*v2) / df t = (x1-x2)/ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here! t = ma.filled(t, 1) # replace NaN t-values with 1.0 probs = betai(0.5 * df, 0.5, df/(df + t*t)).reshape(t.shape) return t, probs.squeeze() ttest_ind.__doc__ = stats.ttest_ind.__doc__ def ttest_rel(a, b, axis=0): a, b, axis = _chk2_asarray(a, b, axis) if len(a) != len(b): raise ValueError('unequal length arrays') if a.size == 0 or b.size == 0: return (np.nan, np.nan) (x1, x2) = (a.mean(axis), b.mean(axis)) (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1)) n = a.count(axis) df = (n-1.0) d = (a-b).astype('d') denom = ma.sqrt((n*ma.add.reduce(d*d,axis) - ma.add.reduce(d,axis)**2) / df) t = ma.add.reduce(d, axis) / denom t = ma.filled(t, 1) probs = betai(0.5*df,0.5,df/(df+t*t)).reshape(t.shape).squeeze() return t, probs ttest_rel.__doc__ = stats.ttest_rel.__doc__ # stats.chisquare works with masked arrays, so we don't need to # implement it here. # For backwards compatibilty, stats.chisquare is included in # the stats.mstats namespace. chisquare = stats.chisquare def mannwhitneyu(x,y, use_continuity=True): """ Computes the Mann-Whitney statistic Missing values in `x` and/or `y` are discarded. Parameters ---------- x : sequence Input y : sequence Input use_continuity : {True, False}, optional Whether a continuity correction (1/2.) should be taken into account. Returns ------- u : float The Mann-Whitney statistics prob : float Approximate p-value assuming a normal distribution. """ x = ma.asarray(x).compressed().view(ndarray) y = ma.asarray(y).compressed().view(ndarray) ranks = rankdata(np.concatenate([x,y])) (nx, ny) = (len(x), len(y)) nt = nx + ny U = ranks[:nx].sum() - nx*(nx+1)/2. U = max(U, nx*ny - U) u = nx*ny - U mu = (nx*ny)/2. sigsq = (nt**3 - nt)/12. ties = count_tied_groups(ranks) sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12. sigsq *= nx*ny/float(nt*(nt-1)) if use_continuity: z = (U - 1/2. - mu) / ma.sqrt(sigsq) else: z = (U - mu) / ma.sqrt(sigsq) prob = special.erfc(abs(z)/np.sqrt(2)) return (u, prob) def kruskalwallis(*args): output = argstoarray(*args) ranks = ma.masked_equal(rankdata(output, use_missing=False), 0) sumrk = ranks.sum(-1) ngrp = ranks.count(-1) ntot = ranks.count() H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1) # Tie correction ties = count_tied_groups(ranks) T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot) if T == 0: raise ValueError('All numbers are identical in kruskal') H /= T df = len(output) - 1 prob = stats.chisqprob(H,df) return (H, prob) kruskal = kruskalwallis kruskalwallis.__doc__ = stats.kruskal.__doc__ def ks_twosamp(data1, data2, alternative="two-sided"): """ Computes the Kolmogorov-Smirnov test on two samples. Missing values are discarded. Parameters ---------- data1 : array_like First data set data2 : array_like Second data set alternative : {'two-sided', 'less', 'greater'}, optional Indicates the alternative hypothesis. Default is 'two-sided'. Returns ------- d : float Value of the Kolmogorov Smirnov test p : float Corresponding p-value. """ (data1, data2) = (ma.asarray(data1), ma.asarray(data2)) (n1, n2) = (data1.count(), data2.count()) n = (n1*n2/float(n1+n2)) mix = ma.concatenate((data1.compressed(), data2.compressed())) mixsort = mix.argsort(kind='mergesort') csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum() # Check for ties if len(np.unique(mix)) < (n1+n2): csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]] alternative = str(alternative).lower()[0] if alternative == 't': d = ma.abs(csum).max() prob = special.kolmogorov(np.sqrt(n)*d) elif alternative == 'l': d = -csum.min() prob = np.exp(-2*n*d**2) elif alternative == 'g': d = csum.max() prob = np.exp(-2*n*d**2) else: raise ValueError("Invalid value for the alternative hypothesis: " "should be in 'two-sided', 'less' or 'greater'") return (d, prob) ks_2samp = ks_twosamp def ks_twosamp_old(data1, data2): """ Computes the Kolmogorov-Smirnov statistic on 2 samples. Returns ------- KS D-value, p-value """ (data1, data2) = [ma.asarray(d).compressed() for d in (data1,data2)] return stats.ks_2samp(data1,data2) def threshold(a, threshmin=None, threshmax=None, newval=0): """ Clip array to a given value. Similar to numpy.clip(), except that values less than `threshmin` or greater than `threshmax` are replaced by `newval`, instead of by `threshmin` and `threshmax` respectively. Parameters ---------- a : ndarray Input data threshmin : {None, float}, optional Lower threshold. If None, set to the minimum value. threshmax : {None, float}, optional Upper threshold. If None, set to the maximum value. newval : {0, float}, optional Value outside the thresholds. Returns ------- threshold : ndarray Returns `a`, with values less then `threshmin` and values greater `threshmax` replaced with `newval`. """ a = ma.array(a, copy=True) mask = np.zeros(a.shape, dtype=bool) if threshmin is not None: mask |= (a < threshmin).filled(False) if threshmax is not None: mask |= (a > threshmax).filled(False) a[mask] = newval return a def trima(a, limits=None, inclusive=(True,True)): """ Trims an array by masking the data outside some given limits. Returns a masked version of the input array. Parameters ---------- a : array_like Input array. limits : {None, tuple}, optional Tuple of (lower limit, upper limit) in absolute values. Values of the input array lower (greater) than the lower (upper) limit will be masked. A limit is None indicates an open interval. inclusive : (bool, bool) tuple, optional Tuple of (lower flag, upper flag), indicating whether values exactly equal to the lower (upper) limit are allowed. """ a = ma.asarray(a) a.unshare_mask() if (limits is None) or (limits == (None, None)): return a (lower_lim, upper_lim) = limits (lower_in, upper_in) = inclusive condition = False if lower_lim is not None: if lower_in: condition |= (a < lower_lim) else: condition |= (a <= lower_lim) if upper_lim is not None: if upper_in: condition |= (a > upper_lim) else: condition |= (a >= upper_lim) a[condition.filled(True)] = masked return a def trimr(a, limits=None, inclusive=(True, True), axis=None): """ Trims an array by masking some proportion of the data on each end. Returns a masked version of the input array. Parameters ---------- a : sequence Input array. limits : {None, tuple}, optional Tuple of the percentages to cut on each side of the array, with respect to the number of unmasked data, as floats between 0. and 1. Noting n the number of unmasked data before trimming, the (n*limits[0])th smallest data and the (n*limits[1])th largest data are masked, and the total number of unmasked data after trimming is n*(1.-sum(limits)). The value of one limit can be set to None to indicate an open interval. inclusive : {(True,True) tuple}, optional Tuple of flags indicating whether the number of data being masked on the left (right) end should be truncated (True) or rounded (False) to integers. axis : {None,int}, optional Axis along which to trim. If None, the whole array is trimmed, but its shape is maintained. """ def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive): n = a.count() idx = a.argsort() if low_limit: if low_inclusive: lowidx = int(low_limit*n) else: lowidx = np.round(low_limit*n) a[idx[:lowidx]] = masked if up_limit is not None: if up_inclusive: upidx = n - int(n*up_limit) else: upidx = n - np.round(n*up_limit) a[idx[upidx:]] = masked return a a = ma.asarray(a) a.unshare_mask() if limits is None: return a # Check the limits (lolim, uplim) = limits errmsg = "The proportion to cut from the %s should be between 0. and 1." if lolim is not None: if lolim > 1. or lolim < 0: raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) if uplim is not None: if uplim > 1. or uplim < 0: raise ValueError(errmsg % 'end' + "(got %s)" % uplim) (loinc, upinc) = inclusive if axis is None: shp = a.shape return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp) else: return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc) trimdoc = """ Parameters ---------- a : sequence Input array limits : {None, tuple}, optional If `relative` is False, tuple (lower limit, upper limit) in absolute values. Values of the input array lower (greater) than the lower (upper) limit are masked. If `relative` is True, tuple (lower percentage, upper percentage) to cut on each side of the array, with respect to the number of unmasked data. Noting n the number of unmasked data before trimming, the (n*limits[0])th smallest data and the (n*limits[1])th largest data are masked, and the total number of unmasked data after trimming is n*(1.-sum(limits)) In each case, the value of one limit can be set to None to indicate an open interval. If limits is None, no trimming is performed inclusive : {(bool, bool) tuple}, optional If `relative` is False, tuple indicating whether values exactly equal to the absolute limits are allowed. If `relative` is True, tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). relative : bool, optional Whether to consider the limits as absolute values (False) or proportions to cut (True). axis : int, optional Axis along which to trim. """ def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None): """ Trims an array by masking the data outside some given limits. Returns a masked version of the input array. %s Examples -------- >>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10] >>> trim(z,(3,8)) [--,--, 3, 4, 5, 6, 7, 8,--,--] >>> trim(z,(0.1,0.2),relative=True) [--, 2, 3, 4, 5, 6, 7, 8,--,--] """ if relative: return trimr(a, limits=limits, inclusive=inclusive, axis=axis) else: return trima(a, limits=limits, inclusive=inclusive) if trim.__doc__ is not None: trim.__doc__ = trim.__doc__ % trimdoc def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None): """ Trims the smallest and largest data values. Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and ``int(proportiontocut * n)`` largest values of data along the given axis, where n is the number of unmasked values before trimming. Parameters ---------- data : ndarray Data to trim. proportiontocut : float, optional Percentage of trimming (as a float between 0 and 1). If n is the number of unmasked values before trimming, the number of values after trimming is ``(1 - 2*proportiontocut) * n``. Default is 0.2. inclusive : {(bool, bool) tuple}, optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). axis : int, optional Axis along which to perform the trimming. If None, the input array is first flattened. """ return trimr(data, limits=(proportiontocut,proportiontocut), inclusive=inclusive, axis=axis) def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True), axis=None): """ Trims the data by masking values from one tail. Parameters ---------- data : array_like Data to trim. proportiontocut : float, optional Percentage of trimming. If n is the number of unmasked values before trimming, the number of values after trimming is ``(1 - proportiontocut) * n``. Default is 0.2. tail : {'left','right'}, optional If 'left' the `proportiontocut` lowest values will be masked. If 'right' the `proportiontocut` highest values will be masked. Default is 'left'. inclusive : {(bool, bool) tuple}, optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). Default is (True, True). axis : int, optional Axis along which to perform the trimming. If None, the input array is first flattened. Default is None. Returns ------- trimtail : ndarray Returned array of same shape as `data` with masked tail values. """ tail = str(tail).lower()[0] if tail == 'l': limits = (proportiontocut,None) elif tail == 'r': limits = (None, proportiontocut) else: raise TypeError("The tail argument should be in ('left','right')") return trimr(data, limits=limits, axis=axis, inclusive=inclusive) trim1 = trimtail def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, axis=None): """Returns the trimmed mean of the data along the given axis. %s """ % trimdoc if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) if relative: return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis) else: return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis) def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, axis=None, ddof=0): """Returns the trimmed variance of the data along the given axis. %s ddof : {0,integer}, optional Means Delta Degrees of Freedom. The denominator used during computations is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- biased estimate of the variance. """ % trimdoc if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) if relative: out = trimr(a,limits=limits, inclusive=inclusive,axis=axis) else: out = trima(a,limits=limits,inclusive=inclusive) return out.var(axis=axis, ddof=ddof) def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, axis=None, ddof=0): """Returns the trimmed standard deviation of the data along the given axis. %s ddof : {0,integer}, optional Means Delta Degrees of Freedom. The denominator used during computations is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- biased estimate of the variance. """ % trimdoc if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) if relative: out = trimr(a,limits=limits,inclusive=inclusive,axis=axis) else: out = trima(a,limits=limits,inclusive=inclusive) return out.std(axis=axis,ddof=ddof) def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None): """ Returns the standard error of the trimmed mean along the given axis. Parameters ---------- a : sequence Input array limits : {(0.1,0.1), tuple of float}, optional tuple (lower percentage, upper percentage) to cut on each side of the array, with respect to the number of unmasked data. If n is the number of unmasked data before trimming, the values smaller than ``n * limits[0]`` and the values larger than ``n * `limits[1]`` are masked, and the total number of unmasked data after trimming is ``n * (1.-sum(limits))``. In each case, the value of one limit can be set to None to indicate an open interval. If `limits` is None, no trimming is performed. inclusive : {(bool, bool) tuple} optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). axis : int, optional Axis along which to trim. Returns ------- trimmed_stde : scalar or ndarray """ def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive): "Returns the standard error of the trimmed mean for a 1D input data." n = a.count() idx = a.argsort() if low_limit: if low_inclusive: lowidx = int(low_limit*n) else: lowidx = np.round(low_limit*n) a[idx[:lowidx]] = masked if up_limit is not None: if up_inclusive: upidx = n - int(n*up_limit) else: upidx = n - np.round(n*up_limit) a[idx[upidx:]] = masked a[idx[:lowidx]] = a[idx[lowidx]] a[idx[upidx:]] = a[idx[upidx-1]] winstd = a.std(ddof=1) return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a))) a = ma.array(a, copy=True, subok=True) a.unshare_mask() if limits is None: return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis)) if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) # Check the limits (lolim, uplim) = limits errmsg = "The proportion to cut from the %s should be between 0. and 1." if lolim is not None: if lolim > 1. or lolim < 0: raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) if uplim is not None: if uplim > 1. or uplim < 0: raise ValueError(errmsg % 'end' + "(got %s)" % uplim) (loinc, upinc) = inclusive if (axis is None): return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc) else: if a.ndim > 2: raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim) return ma.apply_along_axis(_trimmed_stde_1D, axis, a, lolim,uplim,loinc,upinc) def tmean(a, limits=None, inclusive=(True,True)): return trima(a, limits=limits, inclusive=inclusive).mean() tmean.__doc__ = stats.tmean.__doc__ def tvar(a, limits=None, inclusive=(True,True)): a = a.astype(float).ravel() if limits is None: n = (~a.mask).sum() # todo: better way to do that? r = trima(a, limits=limits, inclusive=inclusive).var() * (n/(n-1.)) else: raise ValueError('mstats.tvar() with limits not implemented yet so far') return r tvar.__doc__ = stats.tvar.__doc__ def tmin(a, lowerlimit=None, axis=0, inclusive=True): a, axis = _chk_asarray(a, axis) am = trima(a, (lowerlimit, None), (inclusive, False)) return ma.minimum.reduce(am, axis) tmin.__doc__ = stats.tmin.__doc__ def tmax(a, upperlimit, axis=0, inclusive=True): a, axis = _chk_asarray(a, axis) am = trima(a, (None, upperlimit), (False, inclusive)) return ma.maximum.reduce(am, axis) tmax.__doc__ = stats.tmax.__doc__ def tsem(a, limits=None, inclusive=(True,True)): a = ma.asarray(a).ravel() if limits is None: n = float(a.count()) return a.std(ddof=1)/ma.sqrt(n) am = trima(a.ravel(), limits, inclusive) sd = np.sqrt(am.var(ddof=1)) return sd / np.sqrt(am.count()) tsem.__doc__ = stats.tsem.__doc__ def winsorize(a, limits=None, inclusive=(True, True), inplace=False, axis=None): """Returns a Winsorized version of the input array. The (limits[0])th lowest values are set to the (limits[0])th percentile, and the (limits[1])th highest values are set to the (1 - limits[1])th percentile. Masked values are skipped. Parameters ---------- a : sequence Input array. limits : {None, tuple of float}, optional Tuple of the percentages to cut on each side of the array, with respect to the number of unmasked data, as floats between 0. and 1. Noting n the number of unmasked data before trimming, the (n*limits[0])th smallest data and the (n*limits[1])th largest data are masked, and the total number of unmasked data after trimming is n*(1.-sum(limits)) The value of one limit can be set to None to indicate an open interval. inclusive : {(True, True) tuple}, optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). inplace : {False, True}, optional Whether to winsorize in place (True) or to use a copy (False) axis : {None, int}, optional Axis along which to trim. If None, the whole array is trimmed, but its shape is maintained. Notes ----- This function is applied to reduce the effect of possibly spurious outliers by limiting the extreme values. """ def _winsorize1D(a, low_limit, up_limit, low_include, up_include): n = a.count() idx = a.argsort() if low_limit: if low_include: lowidx = int(low_limit * n) else: lowidx = np.round(low_limit * n) a[idx[:lowidx]] = a[idx[lowidx]] if up_limit is not None: if up_include: upidx = n - int(n * up_limit) else: upidx = n - np.round(n * up_limit) a[idx[upidx:]] = a[idx[upidx - 1]] return a # We are going to modify a: better make a copy a = ma.array(a, copy=np.logical_not(inplace)) if limits is None: return a if (not isinstance(limits, tuple)) and isinstance(limits, float): limits = (limits, limits) # Check the limits (lolim, uplim) = limits errmsg = "The proportion to cut from the %s should be between 0. and 1." if lolim is not None: if lolim > 1. or lolim < 0: raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) if uplim is not None: if uplim > 1. or uplim < 0: raise ValueError(errmsg % 'end' + "(got %s)" % uplim) (loinc, upinc) = inclusive if axis is None: shp = a.shape return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp) else: return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc, upinc) def moment(a, moment=1, axis=0): a, axis = _chk_asarray(a, axis) if moment == 1: # By definition the first moment about the mean is 0. shape = list(a.shape) del shape[axis] if shape: # return an actual array of the appropriate shape return np.zeros(shape, dtype=float) else: # the input was 1D, so return a scalar instead of a rank-0 array return np.float64(0.0) else: mn = ma.expand_dims(a.mean(axis=axis), axis) s = ma.power((a-mn), moment) return s.mean(axis=axis) moment.__doc__ = stats.moment.__doc__ def variation(a, axis=0): a, axis = _chk_asarray(a, axis) return a.std(axis)/a.mean(axis) variation.__doc__ = stats.variation.__doc__ def skew(a, axis=0, bias=True): a, axis = _chk_asarray(a,axis) n = a.count(axis) m2 = moment(a, 2, axis) m3 = moment(a, 3, axis) olderr = np.seterr(all='ignore') try: vals = ma.where(m2 == 0, 0, m3 / m2**1.5) finally: np.seterr(**olderr) if not bias: can_correct = (n > 2) & (m2 > 0) if can_correct.any(): m2 = np.extract(can_correct, m2) m3 = np.extract(can_correct, m3) nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5 np.place(vals, can_correct, nval) return vals skew.__doc__ = stats.skew.__doc__ def kurtosis(a, axis=0, fisher=True, bias=True): a, axis = _chk_asarray(a, axis) m2 = moment(a, 2, axis) m4 = moment(a, 4, axis) olderr = np.seterr(all='ignore') try: vals = ma.where(m2 == 0, 0, m4 / m2**2.0) finally: np.seterr(**olderr) if not bias: n = a.count(axis) can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0) if can_correct.any(): n = np.extract(can_correct, n) m2 = np.extract(can_correct, m2) m4 = np.extract(can_correct, m4) nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0) np.place(vals, can_correct, nval+3.0) if fisher: return vals - 3 else: return vals kurtosis.__doc__ = stats.kurtosis.__doc__ def describe(a, axis=0,ddof=0): """ Computes several descriptive statistics of the passed array. Parameters ---------- a : array axis : int or None ddof : int degree of freedom (default 0); note that default ddof is different from the same routine in stats.describe Returns ------- n : int (size of the data (discarding missing values) mm : (int, int) min, max arithmetic mean : float unbiased variance : float biased skewness : float biased kurtosis : float Examples -------- >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1]) >>> describe(ma) (array(3), (0, 2), 1.0, 1.0, masked_array(data = 0.0, mask = False, fill_value = 1e+20) , -1.5) """ a, axis = _chk_asarray(a, axis) n = a.count(axis) mm = (ma.minimum.reduce(a), ma.maximum.reduce(a)) m = a.mean(axis) v = a.var(axis,ddof=ddof) sk = skew(a, axis) kurt = kurtosis(a, axis) return n, mm, m, v, sk, kurt def stde_median(data, axis=None): """Returns the McKean-Schrader estimate of the standard error of the sample median along the given axis. masked values are discarded. Parameters ---------- data : ndarray Data to trim. axis : {None,int}, optional Axis along which to perform the trimming. If None, the input array is first flattened. """ def _stdemed_1D(data): data = np.sort(data.compressed()) n = len(data) z = 2.5758293035489004 k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0)) return ((data[n-k] - data[k-1])/(2.*z)) data = ma.array(data, copy=False, subok=True) if (axis is None): return _stdemed_1D(data) else: if data.ndim > 2: raise ValueError("Array 'data' must be at most two dimensional, " "but got data.ndim = %d" % data.ndim) return ma.apply_along_axis(_stdemed_1D, axis, data) def skewtest(a, axis=0): a, axis = _chk_asarray(a, axis) if axis is None: a = a.ravel() axis = 0 b2 = skew(a,axis) n = a.count(axis) if np.min(n) < 8: raise ValueError( "skewtest is not valid with less than 8 samples; %i samples" " were given." % np.min(n)) y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2))) beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9)) W2 = -1 + ma.sqrt(2*(beta2-1)) delta = 1/ma.sqrt(0.5*ma.log(W2)) alpha = ma.sqrt(2.0/(W2-1)) y = ma.where(y == 0, 1, y) Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1)) return Z, 2 * distributions.norm.sf(np.abs(Z)) skewtest.__doc__ = stats.skewtest.__doc__ def kurtosistest(a, axis=0): a, axis = _chk_asarray(a, axis) n = a.count(axis=axis) if np.min(n) < 5: raise ValueError( "kurtosistest requires at least 5 observations; %i observations" " were given." % np.min(n)) if np.min(n) < 20: warnings.warn( "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" % np.min(n)) b2 = kurtosis(a, axis, fisher=False) E = 3.0*(n-1) / (n+1) varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) x = (b2-E)/ma.sqrt(varb2) sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / (n*(n-2)*(n-3))) A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) term1 = 1 - 2./(9.0*A) denom = 1 + x*ma.sqrt(2/(A-4.0)) if np.ma.isMaskedArray(denom): # For multi-dimensional array input denom[denom < 0] = masked elif denom < 0: denom = masked term2 = ma.power((1-2.0/A)/denom,1/3.0) Z = (term1 - term2) / np.sqrt(2/(9.0*A)) return Z, 2 * distributions.norm.sf(np.abs(Z)) kurtosistest.__doc__ = stats.kurtosistest.__doc__ def normaltest(a, axis=0): a, axis = _chk_asarray(a, axis) s, _ = skewtest(a, axis) k, _ = kurtosistest(a, axis) k2 = s*s + k*k return k2, stats.chisqprob(k2,2) normaltest.__doc__ = stats.normaltest.__doc__ def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): """ Computes empirical quantiles for a data array. Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``, where ``x[j]`` is the j-th order statistic, and gamma is a function of ``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and ``g = n*p + m - j``. Reinterpreting the above equations to compare to **R** lead to the equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)`` Typical values of (alphap,betap) are: - (0,1) : ``p(k) = k/n`` : linear interpolation of cdf (**R** type 4) - (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function (**R** type 5) - (0,0) : ``p(k) = k/(n+1)`` : (**R** type 6) - (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])]. (**R** type 7, **R** default) - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])]. The resulting quantile estimates are approximately median-unbiased regardless of the distribution of x. (**R** type 8) - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom. The resulting quantile estimates are approximately unbiased if x is normally distributed (**R** type 9) - (.4,.4) : approximately quantile unbiased (Cunnane) - (.35,.35): APL, used with PWM Parameters ---------- a : array_like Input data, as a sequence or array of dimension at most 2. prob : array_like, optional List of quantiles to compute. alphap : float, optional Plotting positions parameter, default is 0.4. betap : float, optional Plotting positions parameter, default is 0.4. axis : int, optional Axis along which to perform the trimming. If None (default), the input array is first flattened. limit : tuple Tuple of (lower, upper) values. Values of `a` outside this open interval are ignored. Returns ------- mquantiles : MaskedArray An array containing the calculated quantiles. Notes ----- This formulation is very similar to **R** except the calculation of ``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined with each type. References ---------- .. [1] *R* statistical software: http://www.r-project.org/ .. [2] *R* ``quantile`` function: http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html Examples -------- >>> from scipy.stats.mstats import mquantiles >>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.]) >>> mquantiles(a) array([ 19.2, 40. , 42.8]) Using a 2D array, specifying axis and limit. >>> data = np.array([[ 6., 7., 1.], [ 47., 15., 2.], [ 49., 36., 3.], [ 15., 39., 4.], [ 42., 40., -999.], [ 41., 41., -999.], [ 7., -999., -999.], [ 39., -999., -999.], [ 43., -999., -999.], [ 40., -999., -999.], [ 36., -999., -999.]]) >>> mquantiles(data, axis=0, limit=(0, 50)) array([[ 19.2 , 14.6 , 1.45], [ 40. , 37.5 , 2.5 ], [ 42.8 , 40.05, 3.55]]) >>> data[:, 2] = -999. >>> mquantiles(data, axis=0, limit=(0, 50)) masked_array(data = [[19.2 14.6 --] [40.0 37.5 --] [42.8 40.05 --]], mask = [[False False True] [False False True] [False False True]], fill_value = 1e+20) """ def _quantiles1D(data,m,p): x = np.sort(data.compressed()) n = len(x) if n == 0: return ma.array(np.empty(len(p), dtype=float), mask=True) elif n == 1: return ma.array(np.resize(x, p.shape), mask=nomask) aleph = (n*p + m) k = np.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] data = ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = masked p = np.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) # Computes quantiles along axis (or globally) if (axis is None): return _quantiles1D(data, m, p) return ma.apply_along_axis(_quantiles1D, axis, data, m, p) def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4): """Calculate the score at the given 'per' percentile of the sequence a. For example, the score at per=50 is the median. This function is a shortcut to mquantile """ if (per < 0) or (per > 100.): raise ValueError("The percentile should be between 0. and 100. !" " (got %s)" % per) return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap, limit=limit, axis=0).squeeze() def plotting_positions(data, alpha=0.4, beta=0.4): """ Returns plotting positions (or empirical percentile points) for the data. Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where: - i is the rank order statistics - n is the number of unmasked values along the given axis - `alpha` and `beta` are two parameters. Typical values for `alpha` and `beta` are: - (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4) - (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function (R, type 5) - (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6) - (1,1) : ``p(k) = (k-1)/(n-1)``, in this case, ``p(k) = mode[F(x[k])]``. That's R default (R type 7) - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then ``p(k) ~ median[F(x[k])]``. The resulting quantile estimates are approximately median-unbiased regardless of the distribution of x. (R type 8) - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom. The resulting quantile estimates are approximately unbiased if x is normally distributed (R type 9) - (.4,.4) : approximately quantile unbiased (Cunnane) - (.35,.35): APL, used with PWM - (.3175, .3175): used in scipy.stats.probplot Parameters ---------- data : array_like Input data, as a sequence or array of dimension at most 2. alpha : float, optional Plotting positions parameter. Default is 0.4. beta : float, optional Plotting positions parameter. Default is 0.4. Returns ------- positions : MaskedArray The calculated plotting positions. """ data = ma.array(data, copy=False).reshape(1,-1) n = data.count() plpos = np.empty(data.size, dtype=float) plpos[n:] = 0 plpos[data.argsort()[:n]] = ((np.arange(1, n+1) - alpha) / (n + 1.0 - alpha - beta)) return ma.array(plpos, mask=data._mask) meppf = plotting_positions def obrientransform(*args): """ Computes a transform on input data (any number of columns). Used to test for homogeneity of variance prior to running one-way stats. Each array in *args is one level of a factor. If an F_oneway() run on the transformed data and found significant, variances are unequal. From Maxwell and Delaney, p.112. Returns: transformed data for use in an ANOVA """ data = argstoarray(*args).T v = data.var(axis=0,ddof=1) m = data.mean(0) n = data.count(0).astype(float) # result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2)) data -= m data **= 2 data *= (n-1.5)*n data -= 0.5*v*(n-1) data /= (n-1.)*(n-2.) if not ma.allclose(v,data.mean(0)): raise ValueError("Lack of convergence in obrientransform.") return data def signaltonoise(data, axis=0): """Calculates the signal-to-noise ratio, as the ratio of the mean over standard deviation along the given axis. Parameters ---------- data : sequence Input data axis : {0, int}, optional Axis along which to compute. If None, the computation is performed on a flat version of the array. """ data = ma.array(data, copy=False) m = data.mean(axis) sd = data.std(axis, ddof=0) return m/sd def sem(a, axis=0, ddof=1): """ Calculates the standard error of the mean of the input array. Also sometimes called standard error of measurement. Parameters ---------- a : array_like An array containing the values for which the standard error is returned. axis : int or None, optional. If axis is None, ravel `a` first. If axis is an integer, this will be the axis over which to operate. Defaults to 0. ddof : int, optional Delta degrees-of-freedom. How many degrees of freedom to adjust for bias in limited samples relative to the population estimate of variance. Defaults to 1. Returns ------- s : ndarray or float The standard error of the mean in the sample(s), along the input axis. Notes ----- The default value for `ddof` changed in scipy 0.15.0 to be consistent with `stats.sem` as well as with the most common definition used (like in the R documentation). Examples -------- Find standard error along the first axis: >>> from scipy import stats >>> a = np.arange(20).reshape(5,4) >>> stats.sem(a) array([ 2.8284, 2.8284, 2.8284, 2.8284]) Find standard error across the whole array, using n degrees of freedom: >>> stats.sem(a, axis=None, ddof=0) 1.2893796958227628 """ a, axis = _chk_asarray(a, axis) n = a.count(axis=axis) s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n) return s zmap = stats.zmap zscore = stats.zscore def f_oneway(*args): """ Performs a 1-way ANOVA, returning an F-value and probability given any number of groups. From Heiman, pp.394-7. Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays, one per treatment group. Returns: f-value, probability """ # Construct a single array of arguments: each row is a group data = argstoarray(*args) ngroups = len(data) ntot = data.count() sstot = (data**2).sum() - (data.sum())**2/float(ntot) ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum() sswg = sstot-ssbg dfbg = ngroups-1 dfwg = ntot - ngroups msb = ssbg/float(dfbg) msw = sswg/float(dfwg) f = msb/msw prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf return f, prob def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b): """Calculation of Wilks lambda F-statistic for multivariate data, per Maxwell & Delaney p.657. """ ER = ma.array(ER, copy=False, ndmin=2) EF = ma.array(EF, copy=False, ndmin=2) if ma.getmask(ER).any() or ma.getmask(EF).any(): raise NotImplementedError("Not implemented when the inputs " "have missing data") lmbda = np.linalg.det(EF) / np.linalg.det(ER) q = ma.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5)) q = ma.filled(q, 1) n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1) d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1) return n_um / d_en def friedmanchisquare(*args): """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. This function calculates the Friedman Chi-square test for repeated measures and returns the result, along with the associated probability value. Each input is considered a given group. Ideally, the number of treatments among each group should be equal. If this is not the case, only the first n treatments are taken into account, where n is the number of treatments of the smallest group. If a group has some missing values, the corresponding treatments are masked in the other groups. The test statistic is corrected for ties. Masked values in one group are propagated to the other groups. Returns: chi-square statistic, associated p-value """ data = argstoarray(*args).astype(float) k = len(data) if k < 3: raise ValueError("Less than 3 groups (%i): " % k + "the Friedman test is NOT appropriate.") ranked = ma.masked_values(rankdata(data, axis=0), 0) if ranked._mask is not nomask: ranked = ma.mask_cols(ranked) ranked = ranked.compressed().reshape(k,-1).view(ndarray) else: ranked = ranked._data (k,n) = ranked.shape # Ties correction repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object) ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int) tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k)) ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2) chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction return chisq, stats.chisqprob(chisq,k-1)
mit
yangjae/grpc
src/python/src/grpc/framework/face/testing/interfaces.py
41
3849
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Interfaces implemented by data sets used in Face-layer tests.""" import abc # cardinality is referenced from specification in this module. from grpc.framework.common import cardinality # pylint: disable=unused-import class Method(object): """An RPC method to be used in tests of RPC implementations.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def name(self): """Identify the name of the method. Returns: The name of the method. """ raise NotImplementedError() @abc.abstractmethod def cardinality(self): """Identify the cardinality of the method. Returns: A cardinality.Cardinality value describing the streaming semantics of the method. """ raise NotImplementedError() @abc.abstractmethod def request_class(self): """Identify the class used for the method's request objects. Returns: The class object of the class to which the method's request objects belong. """ raise NotImplementedError() @abc.abstractmethod def response_class(self): """Identify the class used for the method's response objects. Returns: The class object of the class to which the method's response objects belong. """ raise NotImplementedError() @abc.abstractmethod def serialize_request(self, request): """Serialize the given request object. Args: request: A request object appropriate for this method. """ raise NotImplementedError() @abc.abstractmethod def deserialize_request(self, serialized_request): """Synthesize a request object from a given bytestring. Args: serialized_request: A bytestring deserializable into a request object appropriate for this method. """ raise NotImplementedError() @abc.abstractmethod def serialize_response(self, response): """Serialize the given response object. Args: response: A response object appropriate for this method. """ raise NotImplementedError() @abc.abstractmethod def deserialize_response(self, serialized_response): """Synthesize a response object from a given bytestring. Args: serialized_response: A bytestring deserializable into a response object appropriate for this method. """ raise NotImplementedError()
bsd-3-clause
mortada/tensorflow
tensorflow/contrib/learn/python/learn/tests/dataframe/example_parser_test.py
83
5694
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for learn.dataframe.transforms.example_parser.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from google.protobuf import text_format from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks from tensorflow.core.example import example_pb2 class ExampleParserTestCase(tf.test.TestCase): """Test class for `ExampleParser`.""" def setUp(self): super(ExampleParserTestCase, self).setUp() self.example1 = example_pb2.Example() text_format.Parse("features: { " " feature: { " " key: 'int_feature' " " value: { " " int64_list: { " " value: [ 21, 2, 5 ] " " } " " } " " } " " feature: { " " key: 'string_feature' " " value: { " " bytes_list: { " " value: [ 'armadillo' ] " " } " " } " " } " "} ", self.example1) self.example2 = example_pb2.Example() text_format.Parse("features: { " " feature: { " " key: 'int_feature' " " value: { " " int64_list: { " " value: [ 4, 5, 6 ] " " } " " } " " } " " feature: { " " key: 'string_feature' " " value: { " " bytes_list: { " " value: [ 'car', 'train' ] " " } " " } " " } " "} ", self.example2) self.example_column = mocks.MockSeries( "example", tf.constant( [self.example1.SerializeToString(), self.example2.SerializeToString()], dtype=tf.string, shape=[2])) self.features = (("string_feature", tf.VarLenFeature(dtype=tf.string)), ("int_feature", tf.FixedLenFeature(shape=[3], dtype=tf.int64, default_value=[0, 0, 0]))) self.expected_string_values = np.array(list(self.example1.features.feature[ "string_feature"].bytes_list.value) + list( self.example2.features.feature["string_feature"].bytes_list.value)) self.expected_string_indices = np.array([[0, 0], [1, 0], [1, 1]]) self.expected_int_feature = np.array([list(self.example1.features.feature[ "int_feature"].int64_list.value), list(self.example2.features.feature[ "int_feature"].int64_list.value)]) def testParseWithTupleDefinition(self): parser = example_parser.ExampleParser(self.features) output_columns = parser(self.example_column) self.assertEqual(2, len(output_columns)) cache = {} output_tensors = [o.build(cache) for o in output_columns] self.assertEqual(2, len(output_tensors)) with self.test_session() as sess: string_feature, int_feature = sess.run(output_tensors) np.testing.assert_array_equal( string_feature.dense_shape, np.array([2, 2])) np.testing.assert_array_equal(int_feature.shape, np.array([2, 3])) np.testing.assert_array_equal(self.expected_string_values, string_feature.values) np.testing.assert_array_equal(self.expected_string_indices, string_feature.indices) np.testing.assert_array_equal(self.expected_int_feature, int_feature) def testParseWithDictDefinition(self): parser = example_parser.ExampleParser(dict(self.features)) output_columns = parser(self.example_column) self.assertEqual(2, len(output_columns)) cache = {} output_tensors = [o.build(cache) for o in output_columns] self.assertEqual(2, len(output_tensors)) with self.test_session() as sess: int_feature, string_feature = sess.run(output_tensors) np.testing.assert_array_equal( string_feature.dense_shape, np.array([2, 2])) np.testing.assert_array_equal(int_feature.shape, np.array([2, 3])) np.testing.assert_array_equal(self.expected_string_values, string_feature.values) np.testing.assert_array_equal(self.expected_string_indices, string_feature.indices) np.testing.assert_array_equal(self.expected_int_feature, int_feature) if __name__ == "__main__": tf.test.main()
apache-2.0
OpenSourceActivismTech/call-power
alembic/versions/2bd538139427_phone_number_m2m.py
5
1028
"""phone number m2m Revision ID: 2bd538139427 Revises: a1f4cdc4aa Create Date: 2015-07-10 17:43:05.337612 """ # revision identifiers, used by Alembic. revision = '2bd538139427' down_revision = 'a1f4cdc4aa' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('campaign_phone_numbers', schema=None) as batch_op: batch_op.add_column(sa.Column('id', sa.Integer(), nullable=False, server_default=sa.schema.DefaultClause("0"))) # have to specify default here, because column is not nullable # but because it's new, there should be no problems with setting ids to 0 ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('campaign_phone_numbers', schema=None) as batch_op: batch_op.drop_column('id') ### end Alembic commands ###
agpl-3.0
vasyarv/edx-platform
cms/djangoapps/contentstore/views/tests/test_group_configurations.py
10
35026
#-*- coding: utf-8 -*- """ Group Configuration Tests. """ import json from mock import patch from contentstore.utils import reverse_course_url, reverse_usage_url from contentstore.views.component import SPLIT_TEST_COMPONENT_TYPE from contentstore.course_group_config import GroupConfiguration from contentstore.tests.utils import CourseTestCase from xmodule.partitions.partitions import Group, UserPartition from xmodule.modulestore.tests.factories import ItemFactory from xmodule.validation import StudioValidation, StudioValidationMessage from xmodule.modulestore.django import modulestore from xmodule.modulestore import ModuleStoreEnum GROUP_CONFIGURATION_JSON = { u'name': u'Test name', u'scheme': u'random', u'description': u'Test description', u'version': UserPartition.VERSION, u'groups': [ { u'name': u'Group A', u'version': 1, }, { u'name': u'Group B', u'version': 1, }, ], } # pylint: disable=no-member class HelperMethods(object): """ Mixin that provides useful methods for Group Configuration tests. """ def _create_content_experiment(self, cid=-1, name_suffix='', special_characters=''): """ Create content experiment. Assign Group Configuration to the experiment if cid is provided. """ vertical = ItemFactory.create( category='vertical', parent_location=self.course.location, display_name='Test Unit {}'.format(name_suffix) ) c0_url = self.course.id.make_usage_key("vertical", "split_test_cond0") c1_url = self.course.id.make_usage_key("vertical", "split_test_cond1") c2_url = self.course.id.make_usage_key("vertical", "split_test_cond2") split_test = ItemFactory.create( category='split_test', parent_location=vertical.location, user_partition_id=cid, display_name=u"Test Content Experiment {}{}".format(name_suffix, special_characters), group_id_to_child={"0": c0_url, "1": c1_url, "2": c2_url} ) ItemFactory.create( parent_location=split_test.location, category="vertical", display_name="Condition 0 vertical", location=c0_url, ) ItemFactory.create( parent_location=split_test.location, category="vertical", display_name="Condition 1 vertical", location=c1_url, ) ItemFactory.create( parent_location=split_test.location, category="vertical", display_name="Condition 2 vertical", location=c2_url, ) partitions_json = [p.to_json() for p in self.course.user_partitions] self.client.ajax_post( reverse_usage_url("xblock_handler", split_test.location), data={'metadata': {'user_partitions': partitions_json}} ) self.save_course() return (vertical, split_test) def _create_problem_with_content_group(self, cid, group_id, name_suffix='', special_characters=''): """ Create a problem Assign content group to the problem. """ vertical = ItemFactory.create( category='vertical', parent_location=self.course.location, display_name="Test Unit {}".format(name_suffix) ) problem = ItemFactory.create( category='problem', parent_location=vertical.location, display_name=u"Test Problem {}{}".format(name_suffix, special_characters) ) group_access_content = {'group_access': {cid: [group_id]}} self.client.ajax_post( reverse_usage_url("xblock_handler", problem.location), data={'metadata': group_access_content} ) self.save_course() return vertical, problem def _add_user_partitions(self, count=1, scheme_id="random"): """ Create user partitions for the course. """ partitions = [ UserPartition( i, 'Name ' + str(i), 'Description ' + str(i), [Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')], scheme=None, scheme_id=scheme_id ) for i in xrange(count) ] self.course.user_partitions = partitions self.save_course() # pylint: disable=no-member class GroupConfigurationsBaseTestCase(object): """ Mixin with base test cases for the group configurations. """ def _remove_ids(self, content): """ Remove ids from the response. We cannot predict IDs, because they're generated randomly. We use this method to clean up response when creating new group configurations. Returns a tuple that contains removed group configuration ID and group IDs. """ configuration_id = content.pop("id") group_ids = [group.pop("id") for group in content["groups"]] return (configuration_id, group_ids) def test_required_fields_are_absent(self): """ Test required fields are absent. """ bad_jsons = [ # must have name of the configuration { u'description': 'Test description', u'groups': [ {u'name': u'Group A'}, {u'name': u'Group B'}, ], }, # must have at least one group { u'name': u'Test name', u'description': u'Test description', u'groups': [], }, # an empty json {}, ] for bad_json in bad_jsons: response = self.client.post( self._url(), data=json.dumps(bad_json), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 400) self.assertNotIn("Location", response) content = json.loads(response.content) self.assertIn("error", content) def test_invalid_json(self): """ Test invalid json handling. """ # No property name. invalid_json = "{u'name': 'Test Name', []}" response = self.client.post( self._url(), data=invalid_json, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 400) self.assertNotIn("Location", response) content = json.loads(response.content) self.assertIn("error", content) class GroupConfigurationsListHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods): """ Test cases for group_configurations_list_handler. """ def setUp(self): """ Set up GroupConfigurationsListHandlerTestCase. """ super(GroupConfigurationsListHandlerTestCase, self).setUp() def _url(self): """ Return url for the handler. """ return reverse_course_url('group_configurations_list_handler', self.course.id) def test_view_index_ok(self): """ Basic check that the groups configuration page responds correctly. """ self.course.user_partitions = [ UserPartition(0, 'First name', 'First description', [Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')]), ] self.save_course() if SPLIT_TEST_COMPONENT_TYPE not in self.course.advanced_modules: self.course.advanced_modules.append(SPLIT_TEST_COMPONENT_TYPE) self.store.update_item(self.course, self.user.id) response = self.client.get(self._url()) self.assertEqual(response.status_code, 200) self.assertContains(response, 'First name') self.assertContains(response, 'Group C') self.assertContains(response, 'Content Group Configuration') def test_unsupported_http_accept_header(self): """ Test if not allowed header present in request. """ response = self.client.get( self._url(), HTTP_ACCEPT="text/plain", ) self.assertEqual(response.status_code, 406) def test_can_create_group_configuration(self): """ Test that you can create a group configuration. """ expected = { u'description': u'Test description', u'name': u'Test name', u'scheme': u'random', u'version': UserPartition.VERSION, u'groups': [ {u'name': u'Group A', u'version': 1}, {u'name': u'Group B', u'version': 1}, ], } response = self.client.ajax_post( self._url(), data=GROUP_CONFIGURATION_JSON ) self.assertEqual(response.status_code, 201) self.assertIn("Location", response) content = json.loads(response.content) configuration_id, group_ids = self._remove_ids(content) # pylint: disable=unused-variable self.assertEqual(content, expected) # IDs are unique self.assertEqual(len(group_ids), len(set(group_ids))) self.assertEqual(len(group_ids), 2) self.reload_course() # Verify that user_partitions in the course contains the new group configuration. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 1) self.assertEqual(user_partititons[0].name, u'Test name') self.assertEqual(len(user_partititons[0].groups), 2) self.assertEqual(user_partititons[0].groups[0].name, u'Group A') self.assertEqual(user_partititons[0].groups[1].name, u'Group B') def test_lazily_creates_cohort_configuration(self): """ Test that a cohort schemed user partition is NOT created by default for the user. """ self.assertEqual(len(self.course.user_partitions), 0) self.client.get(self._url()) self.reload_course() self.assertEqual(len(self.course.user_partitions), 0) class GroupConfigurationsDetailHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods): """ Test cases for group_configurations_detail_handler. """ ID = 0 def _url(self, cid=-1): """ Return url for the handler. """ cid = cid if cid > 0 else self.ID return reverse_course_url( 'group_configurations_detail_handler', self.course.id, kwargs={'group_configuration_id': cid}, ) def test_can_create_new_content_group_if_it_does_not_exist(self): """ PUT new content group. """ expected = { u'id': 666, u'name': u'Test name', u'scheme': u'cohort', u'description': u'Test description', u'version': UserPartition.VERSION, u'groups': [ {u'id': 0, u'name': u'Group A', u'version': 1, u'usage': []}, {u'id': 1, u'name': u'Group B', u'version': 1, u'usage': []}, ], } response = self.client.put( self._url(cid=666), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.reload_course() # Verify that user_partitions in the course contains the new group configuration. user_partitions = self.course.user_partitions self.assertEqual(len(user_partitions), 1) self.assertEqual(user_partitions[0].name, u'Test name') self.assertEqual(len(user_partitions[0].groups), 2) self.assertEqual(user_partitions[0].groups[0].name, u'Group A') self.assertEqual(user_partitions[0].groups[1].name, u'Group B') def test_can_edit_content_group(self): """ Edit content group and check its id and modified fields. """ self._add_user_partitions(scheme_id='cohort') self.save_course() expected = { u'id': self.ID, u'name': u'New Test name', u'scheme': u'cohort', u'description': u'New Test description', u'version': UserPartition.VERSION, u'groups': [ {u'id': 0, u'name': u'New Group Name', u'version': 1, u'usage': []}, {u'id': 2, u'name': u'Group C', u'version': 1, u'usage': []}, ], } response = self.client.put( self._url(), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.reload_course() # Verify that user_partitions is properly updated in the course. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 1) self.assertEqual(user_partititons[0].name, u'New Test name') self.assertEqual(len(user_partititons[0].groups), 2) self.assertEqual(user_partititons[0].groups[0].name, u'New Group Name') self.assertEqual(user_partititons[0].groups[1].name, u'Group C') def test_can_delete_content_group(self): """ Delete content group and check user partitions. """ self._add_user_partitions(count=1, scheme_id='cohort') self.save_course() details_url_with_group_id = self._url(cid=0) + '/1' response = self.client.delete( details_url_with_group_id, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) self.reload_course() # Verify that group and partition is properly updated in the course. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 1) self.assertEqual(user_partititons[0].name, 'Name 0') self.assertEqual(len(user_partititons[0].groups), 2) self.assertEqual(user_partititons[0].groups[1].name, 'Group C') def test_cannot_delete_used_content_group(self): """ Cannot delete content group if it is in use. """ self._add_user_partitions(count=1, scheme_id='cohort') self._create_problem_with_content_group(cid=0, group_id=1) details_url_with_group_id = self._url(cid=0) + '/1' response = self.client.delete( details_url_with_group_id, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 400) content = json.loads(response.content) self.assertTrue(content['error']) self.reload_course() # Verify that user_partitions and groups are still the same. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 1) self.assertEqual(len(user_partititons[0].groups), 3) self.assertEqual(user_partititons[0].groups[1].name, 'Group B') def test_cannot_delete_non_existent_content_group(self): """ Cannot delete content group if it is doesn't exist. """ self._add_user_partitions(count=1, scheme_id='cohort') details_url_with_group_id = self._url(cid=0) + '/90' response = self.client.delete( details_url_with_group_id, content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 404) # Verify that user_partitions is still the same. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 1) self.assertEqual(len(user_partititons[0].groups), 3) def test_can_create_new_group_configuration_if_it_does_not_exist(self): """ PUT new group configuration when no configurations exist in the course. """ expected = { u'id': 999, u'name': u'Test name', u'scheme': u'random', u'description': u'Test description', u'version': UserPartition.VERSION, u'groups': [ {u'id': 0, u'name': u'Group A', u'version': 1}, {u'id': 1, u'name': u'Group B', u'version': 1}, ], u'usage': [], } response = self.client.put( self._url(cid=999), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.reload_course() # Verify that user_partitions in the course contains the new group configuration. user_partitions = self.course.user_partitions self.assertEqual(len(user_partitions), 1) self.assertEqual(user_partitions[0].name, u'Test name') self.assertEqual(len(user_partitions[0].groups), 2) self.assertEqual(user_partitions[0].groups[0].name, u'Group A') self.assertEqual(user_partitions[0].groups[1].name, u'Group B') def test_can_edit_group_configuration(self): """ Edit group configuration and check its id and modified fields. """ self._add_user_partitions() self.save_course() expected = { u'id': self.ID, u'name': u'New Test name', u'scheme': u'random', u'description': u'New Test description', u'version': UserPartition.VERSION, u'groups': [ {u'id': 0, u'name': u'New Group Name', u'version': 1}, {u'id': 2, u'name': u'Group C', u'version': 1}, ], u'usage': [], } response = self.client.put( self._url(), data=json.dumps(expected), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) content = json.loads(response.content) self.assertEqual(content, expected) self.reload_course() # Verify that user_partitions is properly updated in the course. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 1) self.assertEqual(user_partititons[0].name, u'New Test name') self.assertEqual(len(user_partititons[0].groups), 2) self.assertEqual(user_partititons[0].groups[0].name, u'New Group Name') self.assertEqual(user_partititons[0].groups[1].name, u'Group C') def test_can_delete_group_configuration(self): """ Delete group configuration and check user partitions. """ self._add_user_partitions(count=2) self.save_course() response = self.client.delete( self._url(cid=0), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 204) self.reload_course() # Verify that user_partitions is properly updated in the course. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 1) self.assertEqual(user_partititons[0].name, 'Name 1') def test_cannot_delete_used_group_configuration(self): """ Cannot delete group configuration if it is in use. """ self._add_user_partitions(count=2) self._create_content_experiment(cid=0) response = self.client.delete( self._url(cid=0), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 400) content = json.loads(response.content) self.assertTrue(content['error']) self.reload_course() # Verify that user_partitions is still the same. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 2) self.assertEqual(user_partititons[0].name, 'Name 0') def test_cannot_delete_non_existent_group_configuration(self): """ Cannot delete group configuration if it is doesn't exist. """ self._add_user_partitions(count=2) response = self.client.delete( self._url(cid=999), content_type="application/json", HTTP_ACCEPT="application/json", HTTP_X_REQUESTED_WITH="XMLHttpRequest", ) self.assertEqual(response.status_code, 404) # Verify that user_partitions is still the same. user_partititons = self.course.user_partitions self.assertEqual(len(user_partititons), 2) self.assertEqual(user_partititons[0].name, 'Name 0') class GroupConfigurationsUsageInfoTestCase(CourseTestCase, HelperMethods): """ Tests for usage information of configurations and content groups. """ def setUp(self): super(GroupConfigurationsUsageInfoTestCase, self).setUp() def _get_expected_content_group(self, usage_for_group): """ Returns the expected configuration with particular usage. """ return { 'id': 0, 'name': 'Name 0', 'scheme': 'cohort', 'description': 'Description 0', 'version': UserPartition.VERSION, 'groups': [ {'id': 0, 'name': 'Group A', 'version': 1, 'usage': []}, {'id': 1, 'name': 'Group B', 'version': 1, 'usage': usage_for_group}, {'id': 2, 'name': 'Group C', 'version': 1, 'usage': []}, ], } def test_content_group_not_used(self): """ Test that right data structure will be created if content group is not used. """ self._add_user_partitions(scheme_id='cohort') actual = GroupConfiguration.get_or_create_content_group(self.store, self.course) expected = self._get_expected_content_group(usage_for_group=[]) self.assertEqual(actual, expected) def test_can_get_correct_usage_info_when_special_characters_are_in_content(self): """ Test if content group json updated successfully with usage information. """ self._add_user_partitions(count=1, scheme_id='cohort') vertical, __ = self._create_problem_with_content_group( cid=0, group_id=1, name_suffix='0', special_characters=u"JOSÉ ANDRÉS" ) actual = GroupConfiguration.get_or_create_content_group(self.store, self.course) expected = self._get_expected_content_group( usage_for_group=[ { 'url': u"/container/{}".format(vertical.location), 'label': u"Test Unit 0 / Test Problem 0JOSÉ ANDRÉS" } ] ) self.assertEqual(actual, expected) def test_can_get_correct_usage_info_for_content_groups(self): """ Test if content group json updated successfully with usage information. """ self._add_user_partitions(count=1, scheme_id='cohort') vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0') actual = GroupConfiguration.get_or_create_content_group(self.store, self.course) expected = self._get_expected_content_group(usage_for_group=[ { 'url': '/container/{}'.format(vertical.location), 'label': 'Test Unit 0 / Test Problem 0' } ]) self.assertEqual(actual, expected) def test_can_use_one_content_group_in_multiple_problems(self): """ Test if multiple problems are present in usage info when they use same content group. """ self._add_user_partitions(scheme_id='cohort') vertical, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='0') vertical1, __ = self._create_problem_with_content_group(cid=0, group_id=1, name_suffix='1') actual = GroupConfiguration.get_or_create_content_group(self.store, self.course) expected = self._get_expected_content_group(usage_for_group=[ { 'url': '/container/{}'.format(vertical.location), 'label': 'Test Unit 0 / Test Problem 0' }, { 'url': '/container/{}'.format(vertical1.location), 'label': 'Test Unit 1 / Test Problem 1' } ]) self.assertEqual(actual, expected) def test_group_configuration_not_used(self): """ Test that right data structure will be created if group configuration is not used. """ self._add_user_partitions() actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course) expected = [{ 'id': 0, 'name': 'Name 0', 'scheme': 'random', 'description': 'Description 0', 'version': UserPartition.VERSION, 'groups': [ {'id': 0, 'name': 'Group A', 'version': 1}, {'id': 1, 'name': 'Group B', 'version': 1}, {'id': 2, 'name': 'Group C', 'version': 1}, ], 'usage': [], }] self.assertEqual(actual, expected) def test_can_get_correct_usage_info(self): """ Test if group configurations json updated successfully with usage information. """ self._add_user_partitions(count=2) vertical, __ = self._create_content_experiment(cid=0, name_suffix='0') self._create_content_experiment(name_suffix='1') actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course) expected = [{ 'id': 0, 'name': 'Name 0', 'scheme': 'random', 'description': 'Description 0', 'version': UserPartition.VERSION, 'groups': [ {'id': 0, 'name': 'Group A', 'version': 1}, {'id': 1, 'name': 'Group B', 'version': 1}, {'id': 2, 'name': 'Group C', 'version': 1}, ], 'usage': [{ 'url': '/container/{}'.format(vertical.location), 'label': 'Test Unit 0 / Test Content Experiment 0', 'validation': None, }], }, { 'id': 1, 'name': 'Name 1', 'scheme': 'random', 'description': 'Description 1', 'version': UserPartition.VERSION, 'groups': [ {'id': 0, 'name': 'Group A', 'version': 1}, {'id': 1, 'name': 'Group B', 'version': 1}, {'id': 2, 'name': 'Group C', 'version': 1}, ], 'usage': [], }] self.assertEqual(actual, expected) def test_can_get_usage_info_when_special_characters_are_used(self): """ Test if group configurations json updated successfully when special characters are being used in content experiment """ self._add_user_partitions(count=1) vertical, __ = self._create_content_experiment(cid=0, name_suffix='0', special_characters=u"JOSÉ ANDRÉS") actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course, ) expected = [{ 'id': 0, 'name': 'Name 0', 'scheme': 'random', 'description': 'Description 0', 'version': UserPartition.VERSION, 'groups': [ {'id': 0, 'name': 'Group A', 'version': 1}, {'id': 1, 'name': 'Group B', 'version': 1}, {'id': 2, 'name': 'Group C', 'version': 1}, ], 'usage': [{ 'url': '/container/{}'.format(vertical.location), 'label': u"Test Unit 0 / Test Content Experiment 0JOSÉ ANDRÉS", 'validation': None, }], }] self.assertEqual(actual, expected) def test_can_use_one_configuration_in_multiple_experiments(self): """ Test if multiple experiments are present in usage info when they use same group configuration. """ self._add_user_partitions() vertical, __ = self._create_content_experiment(cid=0, name_suffix='0') vertical1, __ = self._create_content_experiment(cid=0, name_suffix='1') actual = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course) expected = [{ 'id': 0, 'name': 'Name 0', 'scheme': 'random', 'description': 'Description 0', 'version': UserPartition.VERSION, 'groups': [ {'id': 0, 'name': 'Group A', 'version': 1}, {'id': 1, 'name': 'Group B', 'version': 1}, {'id': 2, 'name': 'Group C', 'version': 1}, ], 'usage': [{ 'url': '/container/{}'.format(vertical.location), 'label': 'Test Unit 0 / Test Content Experiment 0', 'validation': None, }, { 'url': '/container/{}'.format(vertical1.location), 'label': 'Test Unit 1 / Test Content Experiment 1', 'validation': None, }], }] self.assertEqual(actual, expected) def test_can_handle_without_parent(self): """ Test if it possible to handle case when split_test has no parent. """ self._add_user_partitions() # Create split test without parent. with modulestore().branch_setting(ModuleStoreEnum.Branch.published_only): orphan = modulestore().create_item( ModuleStoreEnum.UserID.test, self.course.id, 'split_test', ) orphan.user_partition_id = 0 orphan.display_name = 'Test Content Experiment' modulestore().update_item(orphan, ModuleStoreEnum.UserID.test) self.save_course() actual = GroupConfiguration.get_content_experiment_usage_info(self.store, self.course) self.assertEqual(actual, {0: []}) class GroupConfigurationsValidationTestCase(CourseTestCase, HelperMethods): """ Tests for validation in Group Configurations. """ def setUp(self): super(GroupConfigurationsValidationTestCase, self).setUp() @patch('xmodule.split_test_module.SplitTestDescriptor.validate_split_test') def verify_validation_add_usage_info(self, expected_result, mocked_message, mocked_validation_messages): """ Helper method for testing validation information present after add_usage_info. """ self._add_user_partitions() split_test = self._create_content_experiment(cid=0, name_suffix='0')[1] validation = StudioValidation(split_test.location) validation.add(mocked_message) mocked_validation_messages.return_value = validation group_configuration = GroupConfiguration.get_split_test_partitions_with_usage(self.store, self.course)[0] self.assertEqual(expected_result.to_json(), group_configuration['usage'][0]['validation']) def test_error_message_present(self): """ Tests if validation message is present (error case). """ mocked_message = StudioValidationMessage(StudioValidationMessage.ERROR, u"Validation message") expected_result = StudioValidationMessage( StudioValidationMessage.ERROR, u"This content experiment has issues that affect content visibility." ) self.verify_validation_add_usage_info(expected_result, mocked_message) # pylint: disable=no-value-for-parameter def test_warning_message_present(self): """ Tests if validation message is present (warning case). """ mocked_message = StudioValidationMessage(StudioValidationMessage.WARNING, u"Validation message") expected_result = StudioValidationMessage( StudioValidationMessage.WARNING, u"This content experiment has issues that affect content visibility." ) self.verify_validation_add_usage_info(expected_result, mocked_message) # pylint: disable=no-value-for-parameter @patch('xmodule.split_test_module.SplitTestDescriptor.validate_split_test') def verify_validation_update_usage_info(self, expected_result, mocked_message, mocked_validation_messages): """ Helper method for testing validation information present after update_usage_info. """ self._add_user_partitions() split_test = self._create_content_experiment(cid=0, name_suffix='0')[1] validation = StudioValidation(split_test.location) if mocked_message is not None: validation.add(mocked_message) mocked_validation_messages.return_value = validation group_configuration = GroupConfiguration.update_usage_info( self.store, self.course, self.course.user_partitions[0] ) self.assertEqual( expected_result.to_json() if expected_result is not None else None, group_configuration['usage'][0]['validation'] ) def test_update_usage_info(self): """ Tests if validation message is present when updating usage info. """ mocked_message = StudioValidationMessage(StudioValidationMessage.WARNING, u"Validation message") expected_result = StudioValidationMessage( StudioValidationMessage.WARNING, u"This content experiment has issues that affect content visibility." ) # pylint: disable=no-value-for-parameter self.verify_validation_update_usage_info(expected_result, mocked_message) def test_update_usage_info_no_message(self): """ Tests if validation message is not present when updating usage info. """ self.verify_validation_update_usage_info(None, None) # pylint: disable=no-value-for-parameter
agpl-3.0
Innovahn/cybex
addons/account_payment/__init__.py
436
1279
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## #---------------------------------------------------------- # Init Sales #---------------------------------------------------------- import account_payment import wizard import account_move_line import account_invoice import report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
XTAv2/Enigma2
tools/host_tools/FormatConverter/satxml.py
112
2759
import os from datasource import datasource from xml.dom import minidom from xml.dom.minidom import Document from input import inputText class satxml(datasource): def __init__(self, filename = "satellites.xml"): self.filename = filename datasource.__init__(self) if not os.path.isfile(filename): print "File %s doesn't exist. Creating it." % filename def getStatus(self): text = datasource.getStatus(self) return text def getCapabilities(self): return [("set filename", self.setFilename), ("read file", self.read), ("write file", self.write), ("print all", self.printAll)] def getName(self): return "satellites.xml" def setFilename(self): print "Please give a filename <satellites.xml>:" filename = inputText() if filename == "": self.filename = "satellites.xml" else: self.filename = filename print "Filename set to %s" % self.filename def read(self): basicsatxml = minidom.parse(self.filename) for sat in basicsatxml.firstChild.childNodes: if sat.nodeType == sat.ELEMENT_NODE and sat.localName == "sat": print sat.localName satname = str(sat.getAttribute("name")) satpos = str(sat.getAttribute("position")) self.addSat(satname, satpos) for transponder in sat.childNodes: if transponder.nodeType == transponder.ELEMENT_NODE and transponder.localName == "transponder": parameters = {} paramlist = ["frequency", "symbol_rate", "polarization", "fec", "system", "modulation", "tsid", "onid"] for param in paramlist: entry = str(transponder.getAttribute(param)) if entry != "": parameters[param] = entry if len(parameters.keys()) > 1: self.addTransponder(satpos, parameters) print self.transponderlist def write(self): satxml = Document() satellites = satxml.createElement("satellites") satxml.appendChild(satellites) satlist = self.transponderlist.keys() print self.transponderlist satlist.sort() for sat in satlist: xmlsat = satxml.createElement("sat") xmlsat.setAttribute("name", self.satnames[sat]) xmlsat.setAttribute("flags", "1") xmlsat.setAttribute("position", sat) satellites.appendChild(xmlsat) transponders = self.transponderlist[sat] transponders.sort(key = lambda a: a["frequency"]) for transponder in transponders: xmltransponder = satxml.createElement("transponder") paramlist = ["frequency", "symbol_rate", "polarization", "fec", "system", "modulation", "tsid", "onid"] for param in paramlist: if transponder.has_key(param): xmltransponder.setAttribute(param, transponder[param]) xmlsat.appendChild(xmltransponder) prettyxml = satxml.toprettyxml() print prettyxml file = open(self.filename, "w") file.write(prettyxml) file.close()
gpl-2.0
Ayub-Khan/edx-platform
common/djangoapps/xblock_django/tests/test_user_service.py
132
3992
""" Tests for the DjangoXBlockUserService. """ from django.test import TestCase from xblock_django.user_service import ( DjangoXBlockUserService, ATTR_KEY_IS_AUTHENTICATED, ATTR_KEY_USER_ID, ATTR_KEY_USERNAME, ATTR_KEY_USER_IS_STAFF, ) from student.models import anonymous_id_for_user from student.tests.factories import UserFactory, AnonymousUserFactory from opaque_keys.edx.keys import CourseKey class UserServiceTestCase(TestCase): """ Tests for the DjangoXBlockUserService. """ def setUp(self): super(UserServiceTestCase, self).setUp() self.user = UserFactory(username="tester", email="test@tester.com") self.user.profile.name = "Test Tester" self.anon_user = AnonymousUserFactory() def assert_is_anon_xb_user(self, xb_user): """ A set of assertions for an anonymous XBlockUser. """ self.assertFalse(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED]) self.assertIsNone(xb_user.full_name) self.assertListEqual(xb_user.emails, []) def assert_xblock_user_matches_django(self, xb_user, dj_user): """ A set of assertions for comparing a XBlockUser to a django User """ self.assertTrue(xb_user.opt_attrs[ATTR_KEY_IS_AUTHENTICATED]) self.assertEqual(xb_user.emails[0], dj_user.email) self.assertEqual(xb_user.full_name, dj_user.profile.name) self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USERNAME], dj_user.username) self.assertEqual(xb_user.opt_attrs[ATTR_KEY_USER_ID], dj_user.id) self.assertFalse(xb_user.opt_attrs[ATTR_KEY_USER_IS_STAFF]) def test_convert_anon_user(self): """ Tests for convert_django_user_to_xblock_user behavior when django user is AnonymousUser. """ django_user_service = DjangoXBlockUserService(self.anon_user) xb_user = django_user_service.get_current_user() self.assertTrue(xb_user.is_current_user) self.assert_is_anon_xb_user(xb_user) def test_convert_authenticate_user(self): """ Tests for convert_django_user_to_xblock_user behavior when django user is User. """ django_user_service = DjangoXBlockUserService(self.user) xb_user = django_user_service.get_current_user() self.assertTrue(xb_user.is_current_user) self.assert_xblock_user_matches_django(xb_user, self.user) def test_get_anonymous_user_id_returns_none_for_non_staff_users(self): """ Tests for anonymous_user_id method to return None if user is Non-Staff. """ django_user_service = DjangoXBlockUserService(self.user, user_is_staff=False) anonymous_user_id = django_user_service.get_anonymous_user_id(username=self.user.username, course_id='edx/toy/2012_Fall') self.assertIsNone(anonymous_user_id) def test_get_anonymous_user_id_returns_none_for_non_existing_users(self): """ Tests for anonymous_user_id method to return None username does not exist in system. """ django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True) anonymous_user_id = django_user_service.get_anonymous_user_id(username="No User", course_id='edx/toy/2012_Fall') self.assertIsNone(anonymous_user_id) def test_get_anonymous_user_id_returns_id_for_existing_users(self): """ Tests for anonymous_user_id method returns anonymous user id for a user. """ course_key = CourseKey.from_string('edX/toy/2012_Fall') anon_user_id = anonymous_id_for_user( user=self.user, course_id=course_key, save=True ) django_user_service = DjangoXBlockUserService(self.user, user_is_staff=True) anonymous_user_id = django_user_service.get_anonymous_user_id( username=self.user.username, course_id='edX/toy/2012_Fall' ) self.assertEqual(anonymous_user_id, anon_user_id)
agpl-3.0
inteligencia-coletiva-lsd/pybossa
pybossa/cache/site_stats.py
2
5284
# -*- coding: utf8 -*- # This file is part of PyBossa. # # Copyright (C) 2014 SF Isle of Man Limited # # PyBossa is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyBossa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PyBossa. If not, see <http://www.gnu.org/licenses/>. """Cache module for site statistics.""" import json import pygeoip from sqlalchemy.sql import text from flask import current_app from pybossa.core import db from pybossa.cache import cache, ONE_DAY session = db.slave_session @cache(timeout=ONE_DAY, key_prefix="site_n_auth_users") def n_auth_users(): """Return number of authenticated users.""" sql = text('''SELECT COUNT("user".id) AS n_auth FROM "user";''') results = session.execute(sql) for row in results: n_auth = row.n_auth return n_auth or 0 @cache(timeout=ONE_DAY, key_prefix="site_n_anon_users") def n_anon_users(): """Return number of anonymous users.""" sql = text('''SELECT COUNT(DISTINCT(task_run.user_ip)) AS n_anon FROM task_run;''') results = session.execute(sql) for row in results: n_anon = row.n_anon return n_anon or 0 @cache(timeout=ONE_DAY, key_prefix="site_n_tasks") def n_tasks_site(): """Return number of tasks in the server.""" sql = text('''SELECT COUNT(task.id) AS n_tasks FROM task''') results = session.execute(sql) for row in results: n_tasks = row.n_tasks return n_tasks or 0 @cache(timeout=ONE_DAY, key_prefix="site_n_total_tasks") def n_total_tasks_site(): """Return number of total tasks based on redundancy.""" sql = text('''SELECT SUM(n_answers) AS n_tasks FROM task''') results = session.execute(sql) for row in results: total = row.n_tasks return total or 0 @cache(timeout=ONE_DAY, key_prefix="site_n_task_runs") def n_task_runs_site(): """Return number of task runs in the server.""" sql = text('''SELECT COUNT(task_run.id) AS n_task_runs FROM task_run''') results = session.execute(sql) for row in results: n_task_runs = row.n_task_runs return n_task_runs or 0 @cache(timeout=ONE_DAY, key_prefix="site_top5_apps_24_hours") def get_top5_projects_24_hours(): """Return the top 5 projects more active in the last 24 hours.""" # Top 5 Most active apps in last 24 hours sql = text('''SELECT project.id, project.name, project.short_name, project.info, COUNT(task_run.project_id) AS n_answers FROM project, task_run WHERE project.id=task_run.project_id AND project.hidden=0 AND DATE(task_run.finish_time) > NOW() - INTERVAL '24 hour' AND DATE(task_run.finish_time) <= NOW() GROUP BY project.id ORDER BY n_answers DESC LIMIT 5;''') results = session.execute(sql, dict(limit=5)) top5_apps_24_hours = [] for row in results: tmp = dict(id=row.id, name=row.name, short_name=row.short_name, info=dict(json.loads(row.info)), n_answers=row.n_answers) top5_apps_24_hours.append(tmp) return top5_apps_24_hours @cache(timeout=ONE_DAY, key_prefix="site_top5_users_24_hours") def get_top5_users_24_hours(): """Return top 5 users in last 24 hours.""" # Top 5 Most active users in last 24 hours sql = text('''SELECT "user".id, "user".fullname, "user".name, COUNT(task_run.project_id) AS n_answers FROM "user", task_run WHERE "user".id=task_run.user_id AND DATE(task_run.finish_time) > NOW() - INTERVAL '24 hour' AND DATE(task_run.finish_time) <= NOW() GROUP BY "user".id ORDER BY n_answers DESC LIMIT 5;''') results = session.execute(sql, dict(limit=5)) top5_users_24_hours = [] for row in results: user = dict(id=row.id, fullname=row.fullname, name=row.name, n_answers=row.n_answers) top5_users_24_hours.append(user) return top5_users_24_hours @cache(timeout=ONE_DAY, key_prefix="site_locs") def get_locs(): # pragma: no cover """Return locations (latitude, longitude) for anonymous users.""" # All IP addresses from anonymous users to create a map locs = [] if current_app.config['GEO']: sql = '''SELECT DISTINCT(user_ip) FROM task_run WHERE user_ip IS NOT NULL;''' results = session.execute(sql) geolite = current_app.root_path + '/../dat/GeoLiteCity.dat' gic = pygeoip.GeoIP(geolite) for row in results: loc = gic.record_by_addr(row.user_ip) if loc is None: loc = {} if (len(loc.keys()) == 0): loc['latitude'] = 0 loc['longitude'] = 0 locs.append(dict(loc=loc)) return locs
agpl-3.0
NipponSysits/IIS.Git-Connector
GitConnector/Content/components/highlight/docs/conf.py
20
7771
# -*- coding: utf-8 -*- # # highlight.js documentation build configuration file, created by # sphinx-quickstart on Wed Sep 12 23:48:27 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'highlight.js' copyright = u'2012, Ivan Sagalaev' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '8.0' # The full version, including alpha/beta/rc tags. release = '8.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'highlightjsdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'highlightjs.tex', u'highlight.js Documentation', u'Ivan Sagalaev', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'highlightjs', u'highlight.js Documentation', [u'Ivan Sagalaev'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'highlightjs', u'highlight.js Documentation', u'Ivan Sagalaev', 'highlightjs', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
mit
styxit/CouchPotatoServer
libs/tornado/websocket.py
13
33744
"""Implementation of the WebSocket protocol. `WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional communication between the browser and server. .. warning:: The WebSocket protocol was recently finalized as `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_ and is not yet supported in all browsers. Refer to http://caniuse.com/websockets for details on compatibility. In addition, during development the protocol went through several incompatible versions, and some browsers only support older versions. By default this module only supports the latest version of the protocol, but optional support for an older version (known as "draft 76" or "hixie-76") can be enabled by overriding `WebSocketHandler.allow_draft76` (see that method's documentation for caveats). """ from __future__ import absolute_import, division, print_function, with_statement # Author: Jacob Kristhammar, 2010 import array import base64 import collections import functools import hashlib import os import struct import time import tornado.escape import tornado.web from tornado.concurrent import TracebackFuture from tornado.escape import utf8, native_str from tornado import httpclient, httputil from tornado.ioloop import IOLoop from tornado.iostream import StreamClosedError from tornado.log import gen_log, app_log from tornado.netutil import Resolver from tornado import simple_httpclient from tornado.util import bytes_type, unicode_type try: xrange # py2 except NameError: xrange = range # py3 class WebSocketError(Exception): pass class WebSocketClosedError(WebSocketError): """Raised by operations on a closed connection. .. versionadded:: 3.2 """ pass class WebSocketHandler(tornado.web.RequestHandler): """Subclass this class to create a basic WebSocket handler. Override `on_message` to handle incoming messages, and use `write_message` to send messages to the client. You can also override `open` and `on_close` to handle opened and closed connections. See http://dev.w3.org/html5/websockets/ for details on the JavaScript interface. The protocol is specified at http://tools.ietf.org/html/rfc6455. Here is an example WebSocket handler that echos back all received messages back to the client:: class EchoWebSocket(websocket.WebSocketHandler): def open(self): print "WebSocket opened" def on_message(self, message): self.write_message(u"You said: " + message) def on_close(self): print "WebSocket closed" WebSockets are not standard HTTP connections. The "handshake" is HTTP, but after the handshake, the protocol is message-based. Consequently, most of the Tornado HTTP facilities are not available in handlers of this type. The only communication methods available to you are `write_message()`, `ping()`, and `close()`. Likewise, your request handler class should implement `open()` method rather than ``get()`` or ``post()``. If you map the handler above to ``/websocket`` in your application, you can invoke it in JavaScript with:: var ws = new WebSocket("ws://localhost:8888/websocket"); ws.onopen = function() { ws.send("Hello, world"); }; ws.onmessage = function (evt) { alert(evt.data); }; This script pops up an alert box that says "You said: Hello, world". """ def __init__(self, application, request, **kwargs): tornado.web.RequestHandler.__init__(self, application, request, **kwargs) self.stream = request.connection.stream self.ws_connection = None def _execute(self, transforms, *args, **kwargs): self.open_args = args self.open_kwargs = kwargs # Websocket only supports GET method if self.request.method != 'GET': self.stream.write(tornado.escape.utf8( "HTTP/1.1 405 Method Not Allowed\r\n\r\n" )) self.stream.close() return # Upgrade header should be present and should be equal to WebSocket if self.request.headers.get("Upgrade", "").lower() != 'websocket': self.stream.write(tornado.escape.utf8( "HTTP/1.1 400 Bad Request\r\n\r\n" "Can \"Upgrade\" only to \"WebSocket\"." )) self.stream.close() return # Connection header should be upgrade. Some proxy servers/load balancers # might mess with it. headers = self.request.headers connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(",")) if 'upgrade' not in connection: self.stream.write(tornado.escape.utf8( "HTTP/1.1 400 Bad Request\r\n\r\n" "\"Connection\" must be \"Upgrade\"." )) self.stream.close() return # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"): self.ws_connection = WebSocketProtocol13(self) self.ws_connection.accept_connection() elif (self.allow_draft76() and "Sec-WebSocket-Version" not in self.request.headers): self.ws_connection = WebSocketProtocol76(self) self.ws_connection.accept_connection() else: self.stream.write(tornado.escape.utf8( "HTTP/1.1 426 Upgrade Required\r\n" "Sec-WebSocket-Version: 8\r\n\r\n")) self.stream.close() def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket. The message may be either a string or a dict (which will be encoded as json). If the ``binary`` argument is false, the message will be sent as utf8; in binary mode any byte string is allowed. If the connection is already closed, raises `WebSocketClosedError`. .. versionchanged:: 3.2 `WebSocketClosedError` was added (previously a closed connection would raise an `AttributeError`) """ if self.ws_connection is None: raise WebSocketClosedError() if isinstance(message, dict): message = tornado.escape.json_encode(message) self.ws_connection.write_message(message, binary=binary) def select_subprotocol(self, subprotocols): """Invoked when a new WebSocket requests specific subprotocols. ``subprotocols`` is a list of strings identifying the subprotocols proposed by the client. This method may be overridden to return one of those strings to select it, or ``None`` to not select a subprotocol. Failure to select a subprotocol does not automatically abort the connection, although clients may close the connection if none of their proposed subprotocols was selected. """ return None def open(self): """Invoked when a new WebSocket is opened. The arguments to `open` are extracted from the `tornado.web.URLSpec` regular expression, just like the arguments to `tornado.web.RequestHandler.get`. """ pass def on_message(self, message): """Handle incoming messages on the WebSocket This method must be overridden. """ raise NotImplementedError def ping(self, data): """Send ping frame to the remote end.""" if self.ws_connection is None: raise WebSocketClosedError() self.ws_connection.write_ping(data) def on_pong(self, data): """Invoked when the response to a ping frame is received.""" pass def on_close(self): """Invoked when the WebSocket is closed.""" pass def close(self): """Closes this Web Socket. Once the close handshake is successful the socket will be closed. """ if self.ws_connection: self.ws_connection.close() self.ws_connection = None def allow_draft76(self): """Override to enable support for the older "draft76" protocol. The draft76 version of the websocket protocol is disabled by default due to security concerns, but it can be enabled by overriding this method to return True. Connections using the draft76 protocol do not support the ``binary=True`` flag to `write_message`. Support for the draft76 protocol is deprecated and will be removed in a future version of Tornado. """ return False def set_nodelay(self, value): """Set the no-delay flag for this stream. By default, small messages may be delayed and/or combined to minimize the number of packets sent. This can sometimes cause 200-500ms delays due to the interaction between Nagle's algorithm and TCP delayed ACKs. To reduce this delay (at the expense of possibly increasing bandwidth usage), call ``self.set_nodelay(True)`` once the websocket connection is established. See `.BaseIOStream.set_nodelay` for additional details. .. versionadded:: 3.1 """ self.stream.set_nodelay(value) def get_websocket_scheme(self): """Return the url scheme used for this request, either "ws" or "wss". This is normally decided by HTTPServer, but applications may wish to override this if they are using an SSL proxy that does not provide the X-Scheme header as understood by HTTPServer. Note that this is only used by the draft76 protocol. """ return "wss" if self.request.protocol == "https" else "ws" def async_callback(self, callback, *args, **kwargs): """Obsolete - catches exceptions from the wrapped function. This function is normally unncecessary thanks to `tornado.stack_context`. """ return self.ws_connection.async_callback(callback, *args, **kwargs) def _not_supported(self, *args, **kwargs): raise Exception("Method not supported for Web Sockets") def on_connection_close(self): if self.ws_connection: self.ws_connection.on_connection_close() self.ws_connection = None self.on_close() for method in ["write", "redirect", "set_header", "send_error", "set_cookie", "set_status", "flush", "finish"]: setattr(WebSocketHandler, method, WebSocketHandler._not_supported) class WebSocketProtocol(object): """Base class for WebSocket protocol versions. """ def __init__(self, handler): self.handler = handler self.request = handler.request self.stream = handler.stream self.client_terminated = False self.server_terminated = False def async_callback(self, callback, *args, **kwargs): """Wrap callbacks with this if they are used on asynchronous requests. Catches exceptions properly and closes this WebSocket if an exception is uncaught. """ if args or kwargs: callback = functools.partial(callback, *args, **kwargs) def wrapper(*args, **kwargs): try: return callback(*args, **kwargs) except Exception: app_log.error("Uncaught exception in %s", self.request.path, exc_info=True) self._abort() return wrapper def on_connection_close(self): self._abort() def _abort(self): """Instantly aborts the WebSocket connection by closing the socket""" self.client_terminated = True self.server_terminated = True self.stream.close() # forcibly tear down the connection self.close() # let the subclass cleanup class WebSocketProtocol76(WebSocketProtocol): """Implementation of the WebSockets protocol, version hixie-76. This class provides basic functionality to process WebSockets requests as specified in http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76 """ def __init__(self, handler): WebSocketProtocol.__init__(self, handler) self.challenge = None self._waiting = None def accept_connection(self): try: self._handle_websocket_headers() except ValueError: gen_log.debug("Malformed WebSocket request received") self._abort() return scheme = self.handler.get_websocket_scheme() # draft76 only allows a single subprotocol subprotocol_header = '' subprotocol = self.request.headers.get("Sec-WebSocket-Protocol", None) if subprotocol: selected = self.handler.select_subprotocol([subprotocol]) if selected: assert selected == subprotocol subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected # Write the initial headers before attempting to read the challenge. # This is necessary when using proxies (such as HAProxy), which # need to see the Upgrade headers before passing through the # non-HTTP traffic that follows. self.stream.write(tornado.escape.utf8( "HTTP/1.1 101 WebSocket Protocol Handshake\r\n" "Upgrade: WebSocket\r\n" "Connection: Upgrade\r\n" "Server: TornadoServer/%(version)s\r\n" "Sec-WebSocket-Origin: %(origin)s\r\n" "Sec-WebSocket-Location: %(scheme)s://%(host)s%(uri)s\r\n" "%(subprotocol)s" "\r\n" % (dict( version=tornado.version, origin=self.request.headers["Origin"], scheme=scheme, host=self.request.host, uri=self.request.uri, subprotocol=subprotocol_header)))) self.stream.read_bytes(8, self._handle_challenge) def challenge_response(self, challenge): """Generates the challenge response that's needed in the handshake The challenge parameter should be the raw bytes as sent from the client. """ key_1 = self.request.headers.get("Sec-Websocket-Key1") key_2 = self.request.headers.get("Sec-Websocket-Key2") try: part_1 = self._calculate_part(key_1) part_2 = self._calculate_part(key_2) except ValueError: raise ValueError("Invalid Keys/Challenge") return self._generate_challenge_response(part_1, part_2, challenge) def _handle_challenge(self, challenge): try: challenge_response = self.challenge_response(challenge) except ValueError: gen_log.debug("Malformed key data in WebSocket request") self._abort() return self._write_response(challenge_response) def _write_response(self, challenge): self.stream.write(challenge) self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs) self._receive_message() def _handle_websocket_headers(self): """Verifies all invariant- and required headers If a header is missing or have an incorrect value ValueError will be raised """ fields = ("Origin", "Host", "Sec-Websocket-Key1", "Sec-Websocket-Key2") if not all(map(lambda f: self.request.headers.get(f), fields)): raise ValueError("Missing/Invalid WebSocket headers") def _calculate_part(self, key): """Processes the key headers and calculates their key value. Raises ValueError when feed invalid key.""" # pyflakes complains about variable reuse if both of these lines use 'c' number = int(''.join(c for c in key if c.isdigit())) spaces = len([c2 for c2 in key if c2.isspace()]) try: key_number = number // spaces except (ValueError, ZeroDivisionError): raise ValueError return struct.pack(">I", key_number) def _generate_challenge_response(self, part_1, part_2, part_3): m = hashlib.md5() m.update(part_1) m.update(part_2) m.update(part_3) return m.digest() def _receive_message(self): self.stream.read_bytes(1, self._on_frame_type) def _on_frame_type(self, byte): frame_type = ord(byte) if frame_type == 0x00: self.stream.read_until(b"\xff", self._on_end_delimiter) elif frame_type == 0xff: self.stream.read_bytes(1, self._on_length_indicator) else: self._abort() def _on_end_delimiter(self, frame): if not self.client_terminated: self.async_callback(self.handler.on_message)( frame[:-1].decode("utf-8", "replace")) if not self.client_terminated: self._receive_message() def _on_length_indicator(self, byte): if ord(byte) != 0x00: self._abort() return self.client_terminated = True self.close() def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket.""" if binary: raise ValueError( "Binary messages not supported by this version of websockets") if isinstance(message, unicode_type): message = message.encode("utf-8") assert isinstance(message, bytes_type) self.stream.write(b"\x00" + message + b"\xff") def write_ping(self, data): """Send ping frame.""" raise ValueError("Ping messages not supported by this version of websockets") def close(self): """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): self.stream.write("\xff\x00") self.server_terminated = True if self.client_terminated: if self._waiting is not None: self.stream.io_loop.remove_timeout(self._waiting) self._waiting = None self.stream.close() elif self._waiting is None: self._waiting = self.stream.io_loop.add_timeout( time.time() + 5, self._abort) class WebSocketProtocol13(WebSocketProtocol): """Implementation of the WebSocket protocol from RFC 6455. This class supports versions 7 and 8 of the protocol in addition to the final version 13. """ def __init__(self, handler, mask_outgoing=False): WebSocketProtocol.__init__(self, handler) self.mask_outgoing = mask_outgoing self._final_frame = False self._frame_opcode = None self._masked_frame = None self._frame_mask = None self._frame_length = None self._fragmented_message_buffer = None self._fragmented_message_opcode = None self._waiting = None def accept_connection(self): try: self._handle_websocket_headers() self._accept_connection() except ValueError: gen_log.debug("Malformed WebSocket request received", exc_info=True) self._abort() return def _handle_websocket_headers(self): """Verifies all invariant- and required headers If a header is missing or have an incorrect value ValueError will be raised """ fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version") if not all(map(lambda f: self.request.headers.get(f), fields)): raise ValueError("Missing/Invalid WebSocket headers") @staticmethod def compute_accept_value(key): """Computes the value for the Sec-WebSocket-Accept header, given the value for Sec-WebSocket-Key. """ sha1 = hashlib.sha1() sha1.update(utf8(key)) sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value return native_str(base64.b64encode(sha1.digest())) def _challenge_response(self): return WebSocketProtocol13.compute_accept_value( self.request.headers.get("Sec-Websocket-Key")) def _accept_connection(self): subprotocol_header = '' subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '') subprotocols = [s.strip() for s in subprotocols.split(',')] if subprotocols: selected = self.handler.select_subprotocol(subprotocols) if selected: assert selected in subprotocols subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected self.stream.write(tornado.escape.utf8( "HTTP/1.1 101 Switching Protocols\r\n" "Upgrade: websocket\r\n" "Connection: Upgrade\r\n" "Sec-WebSocket-Accept: %s\r\n" "%s" "\r\n" % (self._challenge_response(), subprotocol_header))) self.async_callback(self.handler.open)(*self.handler.open_args, **self.handler.open_kwargs) self._receive_frame() def _write_frame(self, fin, opcode, data): if fin: finbit = 0x80 else: finbit = 0 frame = struct.pack("B", finbit | opcode) l = len(data) if self.mask_outgoing: mask_bit = 0x80 else: mask_bit = 0 if l < 126: frame += struct.pack("B", l | mask_bit) elif l <= 0xFFFF: frame += struct.pack("!BH", 126 | mask_bit, l) else: frame += struct.pack("!BQ", 127 | mask_bit, l) if self.mask_outgoing: mask = os.urandom(4) data = mask + _websocket_mask(mask, data) frame += data self.stream.write(frame) def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket.""" if binary: opcode = 0x2 else: opcode = 0x1 message = tornado.escape.utf8(message) assert isinstance(message, bytes_type) try: self._write_frame(True, opcode, message) except StreamClosedError: self._abort() def write_ping(self, data): """Send ping frame.""" assert isinstance(data, bytes_type) self._write_frame(True, 0x9, data) def _receive_frame(self): try: self.stream.read_bytes(2, self._on_frame_start) except StreamClosedError: self._abort() def _on_frame_start(self, data): header, payloadlen = struct.unpack("BB", data) self._final_frame = header & 0x80 reserved_bits = header & 0x70 self._frame_opcode = header & 0xf self._frame_opcode_is_control = self._frame_opcode & 0x8 if reserved_bits: # client is using as-yet-undefined extensions; abort self._abort() return self._masked_frame = bool(payloadlen & 0x80) payloadlen = payloadlen & 0x7f if self._frame_opcode_is_control and payloadlen >= 126: # control frames must have payload < 126 self._abort() return try: if payloadlen < 126: self._frame_length = payloadlen if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self.stream.read_bytes(self._frame_length, self._on_frame_data) elif payloadlen == 126: self.stream.read_bytes(2, self._on_frame_length_16) elif payloadlen == 127: self.stream.read_bytes(8, self._on_frame_length_64) except StreamClosedError: self._abort() def _on_frame_length_16(self, data): self._frame_length = struct.unpack("!H", data)[0] try: if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self.stream.read_bytes(self._frame_length, self._on_frame_data) except StreamClosedError: self._abort() def _on_frame_length_64(self, data): self._frame_length = struct.unpack("!Q", data)[0] try: if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self.stream.read_bytes(self._frame_length, self._on_frame_data) except StreamClosedError: self._abort() def _on_masking_key(self, data): self._frame_mask = data try: self.stream.read_bytes(self._frame_length, self._on_masked_frame_data) except StreamClosedError: self._abort() def _on_masked_frame_data(self, data): self._on_frame_data(_websocket_mask(self._frame_mask, data)) def _on_frame_data(self, data): if self._frame_opcode_is_control: # control frames may be interleaved with a series of fragmented # data frames, so control frames must not interact with # self._fragmented_* if not self._final_frame: # control frames must not be fragmented self._abort() return opcode = self._frame_opcode elif self._frame_opcode == 0: # continuation frame if self._fragmented_message_buffer is None: # nothing to continue self._abort() return self._fragmented_message_buffer += data if self._final_frame: opcode = self._fragmented_message_opcode data = self._fragmented_message_buffer self._fragmented_message_buffer = None else: # start of new data message if self._fragmented_message_buffer is not None: # can't start new message until the old one is finished self._abort() return if self._final_frame: opcode = self._frame_opcode else: self._fragmented_message_opcode = self._frame_opcode self._fragmented_message_buffer = data if self._final_frame: self._handle_message(opcode, data) if not self.client_terminated: self._receive_frame() def _handle_message(self, opcode, data): if self.client_terminated: return if opcode == 0x1: # UTF-8 data try: decoded = data.decode("utf-8") except UnicodeDecodeError: self._abort() return self.async_callback(self.handler.on_message)(decoded) elif opcode == 0x2: # Binary data self.async_callback(self.handler.on_message)(data) elif opcode == 0x8: # Close self.client_terminated = True self.close() elif opcode == 0x9: # Ping self._write_frame(True, 0xA, data) elif opcode == 0xA: # Pong self.async_callback(self.handler.on_pong)(data) else: self._abort() def close(self): """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): self._write_frame(True, 0x8, b"") self.server_terminated = True if self.client_terminated: if self._waiting is not None: self.stream.io_loop.remove_timeout(self._waiting) self._waiting = None self.stream.close() elif self._waiting is None: # Give the client a few seconds to complete a clean shutdown, # otherwise just close the connection. self._waiting = self.stream.io_loop.add_timeout( self.stream.io_loop.time() + 5, self._abort) class WebSocketClientConnection(simple_httpclient._HTTPConnection): """WebSocket client connection. This class should not be instantiated directly; use the `websocket_connect` function instead. """ def __init__(self, io_loop, request): self.connect_future = TracebackFuture() self.read_future = None self.read_queue = collections.deque() self.key = base64.b64encode(os.urandom(16)) scheme, sep, rest = request.url.partition(':') scheme = {'ws': 'http', 'wss': 'https'}[scheme] request.url = scheme + sep + rest request.headers.update({ 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Key': self.key, 'Sec-WebSocket-Version': '13', }) self.resolver = Resolver(io_loop=io_loop) super(WebSocketClientConnection, self).__init__( io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.resolver) def close(self): """Closes the websocket connection. .. versionadded:: 3.2 """ if self.protocol is not None: self.protocol.close() self.protocol = None def _on_close(self): self.on_message(None) self.resolver.close() super(WebSocketClientConnection, self)._on_close() def _on_http_response(self, response): if not self.connect_future.done(): if response.error: self.connect_future.set_exception(response.error) else: self.connect_future.set_exception(WebSocketError( "Non-websocket response")) def _handle_1xx(self, code): assert code == 101 assert self.headers['Upgrade'].lower() == 'websocket' assert self.headers['Connection'].lower() == 'upgrade' accept = WebSocketProtocol13.compute_accept_value(self.key) assert self.headers['Sec-Websocket-Accept'] == accept self.protocol = WebSocketProtocol13(self, mask_outgoing=True) self.protocol._receive_frame() if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None self.connect_future.set_result(self) def write_message(self, message, binary=False): """Sends a message to the WebSocket server.""" self.protocol.write_message(message, binary) def read_message(self, callback=None): """Reads a message from the WebSocket server. Returns a future whose result is the message, or None if the connection is closed. If a callback argument is given it will be called with the future when it is ready. """ assert self.read_future is None future = TracebackFuture() if self.read_queue: future.set_result(self.read_queue.popleft()) else: self.read_future = future if callback is not None: self.io_loop.add_future(future, callback) return future def on_message(self, message): if self.read_future is not None: self.read_future.set_result(message) self.read_future = None else: self.read_queue.append(message) def on_pong(self, data): pass def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None): """Client-side websocket support. Takes a url and returns a Future whose result is a `WebSocketClientConnection`. .. versionchanged:: 3.2 Also accepts ``HTTPRequest`` objects in place of urls. """ if io_loop is None: io_loop = IOLoop.current() if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url # Copy and convert the headers dict/object (see comments in # AsyncHTTPClient.fetch) request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout) request = httpclient._RequestProxy( request, httpclient.HTTPRequest._DEFAULTS) conn = WebSocketClientConnection(io_loop, request) if callback is not None: io_loop.add_future(conn.connect_future, callback) return conn.connect_future def _websocket_mask_python(mask, data): """Websocket masking function. `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length. Returns a `bytes` object of the same length as `data` with the mask applied as specified in section 5.3 of RFC 6455. This pure-python implementation may be replaced by an optimized version when available. """ mask = array.array("B", mask) unmasked = array.array("B", data) for i in xrange(len(data)): unmasked[i] = unmasked[i] ^ mask[i % 4] if hasattr(unmasked, 'tobytes'): # tostring was deprecated in py32. It hasn't been removed, # but since we turn on deprecation warnings in our tests # we need to use the right one. return unmasked.tobytes() else: return unmasked.tostring() if os.environ.get('TORNADO_NO_EXTENSION'): # This environment variable exists to make it easier to do performance comparisons; # it's not guaranteed to remain supported in the future. _websocket_mask = _websocket_mask_python else: try: from tornado.speedups import websocket_mask as _websocket_mask except ImportError: _websocket_mask = _websocket_mask_python
gpl-3.0
aristanetworks/neutron
neutron/db/migration/alembic_migrations/heal_script.py
21
10163
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import logging import alembic from alembic import autogenerate as autogen from alembic import context from alembic import op import sqlalchemy from sqlalchemy import schema as sa_schema import sqlalchemy.sql.expression as expr from sqlalchemy.sql import text from sqlalchemy import types from neutron.db.migration.models import frozen as frozen_models from neutron.i18n import _LI, _LW LOG = logging.getLogger(__name__) METHODS = {} def heal(): # This is needed else the heal script will start spewing # a lot of pointless warning messages from alembic. LOG.setLevel(logging.INFO) if context.is_offline_mode(): return models_metadata = frozen_models.get_metadata() # Compare metadata from models and metadata from migrations # Diff example: # [ ( 'add_table', # Table('bat', MetaData(bind=None), # Column('info', String(), table=<bat>), schema=None)), # ( 'remove_table', # Table(u'bar', MetaData(bind=None), # Column(u'data', VARCHAR(), table=<bar>), schema=None)), # ( 'add_column', # None, # 'foo', # Column('data', Integer(), table=<foo>)), # ( 'remove_column', # None, # 'foo', # Column(u'old_data', VARCHAR(), table=None)), # [ ( 'modify_nullable', # None, # 'foo', # u'x', # { 'existing_server_default': None, # 'existing_type': INTEGER()}, # True, # False)]] opts = { 'compare_type': _compare_type, 'compare_server_default': _compare_server_default, } mc = alembic.migration.MigrationContext.configure(op.get_bind(), opts=opts) set_storage_engine(op.get_bind(), "InnoDB") diff = autogen.compare_metadata(mc, models_metadata) for el in diff: execute_alembic_command(el) def execute_alembic_command(command): # Commands like add_table, remove_table, add_index, add_column, etc is a # tuple and can be handle after running special functions from alembic for # them. if isinstance(command, tuple): # Here methods add_table, drop_index, etc is running. Name of method is # the first element of the tuple, arguments to this method comes from # the next element(s). if command[0] in METHODS: METHODS[command[0]](*command[1:]) else: LOG.warning(_LW("Ignoring alembic command %s"), command[0]) else: # For all commands that changing type, nullable or other parameters # of the column is used alter_column method from alembic. parse_modify_command(command) def parse_modify_command(command): # From arguments of command is created op.alter_column() that has the # following syntax: # alter_column(table_name, column_name, nullable=None, # server_default=False, new_column_name=None, type_=None, # autoincrement=None, existing_type=None, # existing_server_default=False, existing_nullable=None, # existing_autoincrement=None, schema=None, **kw) bind = op.get_bind() for modified, schema, table, column, existing, old, new in command: if modified.endswith('type'): modified = 'type_' elif modified.endswith('nullable'): modified = 'nullable' insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind) if column in insp.get_primary_keys(table) and new: return elif modified.endswith('default'): modified = 'server_default' if isinstance(new, basestring): new = text(new) kwargs = {modified: new, 'schema': schema} default = existing.get('existing_server_default') if default and isinstance(default, sa_schema.DefaultClause): if isinstance(default.arg, basestring): existing['existing_server_default'] = default.arg else: existing['existing_server_default'] = default.arg.text kwargs.update(existing) op.alter_column(table, column, **kwargs) def alembic_command_method(f): METHODS[f.__name__] = f return f @alembic_command_method def add_table(table): # Check if table has already exists and needs just to be renamed if not rename(table.name): table.create(bind=op.get_bind(), checkfirst=True) @alembic_command_method def add_index(index): bind = op.get_bind() insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind) if index.name not in [idx['name'] for idx in insp.get_indexes(index.table.name)]: op.create_index(index.name, index.table.name, column_names(index)) @alembic_command_method def remove_table(table): # Tables should not be removed pass @alembic_command_method def remove_index(index): bind = op.get_bind() insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind) index_names = [idx['name'] for idx in insp.get_indexes(index.table.name)] fk_names = [i['name'] for i in insp.get_foreign_keys(index.table.name)] if index.name in index_names and index.name not in fk_names: op.drop_index(index.name, index.table.name) @alembic_command_method def remove_column(schema, table_name, column): op.drop_column(table_name, column.name, schema=schema) @alembic_command_method def add_column(schema, table_name, column): op.add_column(table_name, column.copy(), schema=schema) @alembic_command_method def add_constraint(constraint): op.create_unique_constraint(constraint.name, constraint.table.name, column_names(constraint)) @alembic_command_method def remove_constraint(constraint): op.drop_constraint(constraint.name, constraint.table.name, type_='unique') @alembic_command_method def remove_fk(fk): op.drop_constraint(fk.name, fk.parent.name, type_='foreignkey') @alembic_command_method def add_fk(fk): fk_name = fk.name # As per Mike Bayer's comment, using _fk_spec method is preferable to # direct access to ForeignKeyConstraint attributes fk_spec = alembic.ddl.base._fk_spec(fk) fk_table = fk_spec[1] fk_ref = fk_spec[4] fk_local_cols = fk_spec[2] fk_remote_cols = fk_spec[5] op.create_foreign_key(fk_name, fk_table, fk_ref, fk_local_cols, fk_remote_cols) def check_if_table_exists(table): # This functions checks if table exists or not bind = op.get_bind() insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind) return (table in insp.get_table_names() and table not in frozen_models.renamed_tables) def rename(table): # For tables that were renamed checks if the previous table exists # if it does the previous one will be renamed. # Returns True/False if it is needed to create new table if table in frozen_models.renamed_tables: if check_if_table_exists(frozen_models.renamed_tables[table]): op.rename_table(frozen_models.renamed_tables[table], table) LOG.info(_LI("Table %(old_t)r was renamed to %(new_t)r"), { 'old_t': table, 'new_t': frozen_models.renamed_tables[table]}) return True return False def column_names(obj): return [col.name for col in obj.columns if hasattr(col, 'name')] def _compare_type(ctxt, insp_col, meta_col, insp_type, meta_type): """Return True if types are different, False if not. Return None to allow the default implementation to compare these types. :param ctxt: alembic MigrationContext instance :param insp_col: reflected column :param meta_col: column from model :param insp_type: reflected column type :param meta_type: column type from model """ # some backends (e.g. mysql) don't provide native boolean type BOOLEAN_METADATA = (types.BOOLEAN, types.Boolean) BOOLEAN_SQL = BOOLEAN_METADATA + (types.INTEGER, types.Integer) if isinstance(meta_type, BOOLEAN_METADATA): return not isinstance(insp_type, BOOLEAN_SQL) return None # tells alembic to use the default comparison method def _compare_server_default(ctxt, ins_col, meta_col, insp_def, meta_def, rendered_meta_def): """Compare default values between model and db table. Return True if the defaults are different, False if not, or None to allow the default implementation to compare these defaults. :param ctxt: alembic MigrationContext instance :param insp_col: reflected column :param meta_col: column from model :param insp_def: reflected column default value :param meta_def: column default value from model :param rendered_meta_def: rendered column default value (from model) """ if (ctxt.dialect.name == 'mysql' and isinstance(meta_col.type, sqlalchemy.Boolean)): if meta_def is None or insp_def is None: return meta_def != insp_def return not ( isinstance(meta_def.arg, expr.True_) and insp_def == "'1'" or isinstance(meta_def.arg, expr.False_) and insp_def == "'0'" ) return None # tells alembic to use the default comparison method def set_storage_engine(bind, engine): insp = sqlalchemy.engine.reflection.Inspector.from_engine(bind) if bind.dialect.name == 'mysql': for table in insp.get_table_names(): if insp.get_table_options(table)['mysql_engine'] != engine: op.execute("ALTER TABLE %s ENGINE=%s" % (table, engine))
apache-2.0
bradderz77/Six
six.py
320
27344
"""Utilities for writing code that runs on Python 2 and 3""" # Copyright (c) 2010-2014 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import import functools import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.8.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. # This is a bit ugly, but it avoids running this again. delattr(obj.__class__, self.name) return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) else: def iterkeys(d, **kw): return iter(d.iterkeys(**kw)) def itervalues(d, **kw): return iter(d.itervalues(**kw)) def iteritems(d, **kw): return iter(d.iteritems(**kw)) def iterlists(d, **kw): return iter(d.iterlists(**kw)) _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr if sys.version_info[1] <= 1: def int2byte(i): return bytes((i,)) else: # This is about 2x faster than the implementation above on 3.2+ int2byte = operator.methodcaller("to_bytes", 1, "big") byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) def iterbytes(buf): return (ord(byte) for byte in buf) import StringIO StringIO = BytesIO = StringIO.StringIO _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): raise tp, value, tb """) print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
mit
tedder/ansible
test/units/modules/system/test_known_hosts.py
72
4309
import os import tempfile from ansible.module_utils import basic from units.compat import unittest from ansible.module_utils._text import to_bytes from ansible.module_utils.basic import AnsibleModule from ansible.modules.system.known_hosts import compute_diff, sanity_check class KnownHostsDiffTestCase(unittest.TestCase): def _create_file(self, content): tmp_file = tempfile.NamedTemporaryFile(prefix='ansible-test-', suffix='-known_hosts', delete=False) tmp_file.write(to_bytes(content)) tmp_file.close() self.addCleanup(os.unlink, tmp_file.name) return tmp_file.name def test_no_existing_file(self): path = tempfile.mktemp(prefix='ansible-test-', suffix='-known_hosts') key = 'example.com ssh-rsa AAAAetc\n' diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key) self.assertEqual(diff, { 'before_header': '/dev/null', 'after_header': path, 'before': '', 'after': 'example.com ssh-rsa AAAAetc\n', }) def test_key_addition(self): path = self._create_file( 'two.example.com ssh-rsa BBBBetc\n' ) key = 'one.example.com ssh-rsa AAAAetc\n' diff = compute_diff(path, found_line=None, replace_or_add=False, state='present', key=key) self.assertEqual(diff, { 'before_header': path, 'after_header': path, 'before': 'two.example.com ssh-rsa BBBBetc\n', 'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n', }) def test_no_change(self): path = self._create_file( 'one.example.com ssh-rsa AAAAetc\n' 'two.example.com ssh-rsa BBBBetc\n' ) key = 'one.example.com ssh-rsa AAAAetc\n' diff = compute_diff(path, found_line=1, replace_or_add=False, state='present', key=key) self.assertEqual(diff, { 'before_header': path, 'after_header': path, 'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n', 'after': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n', }) def test_key_change(self): path = self._create_file( 'one.example.com ssh-rsa AAAaetc\n' 'two.example.com ssh-rsa BBBBetc\n' ) key = 'one.example.com ssh-rsa AAAAetc\n' diff = compute_diff(path, found_line=1, replace_or_add=True, state='present', key=key) self.assertEqual(diff, { 'before_header': path, 'after_header': path, 'before': 'one.example.com ssh-rsa AAAaetc\ntwo.example.com ssh-rsa BBBBetc\n', 'after': 'two.example.com ssh-rsa BBBBetc\none.example.com ssh-rsa AAAAetc\n', }) def test_key_removal(self): path = self._create_file( 'one.example.com ssh-rsa AAAAetc\n' 'two.example.com ssh-rsa BBBBetc\n' ) key = 'one.example.com ssh-rsa AAAAetc\n' diff = compute_diff(path, found_line=1, replace_or_add=False, state='absent', key=key) self.assertEqual(diff, { 'before_header': path, 'after_header': path, 'before': 'one.example.com ssh-rsa AAAAetc\ntwo.example.com ssh-rsa BBBBetc\n', 'after': 'two.example.com ssh-rsa BBBBetc\n', }) def test_key_removal_no_change(self): path = self._create_file( 'two.example.com ssh-rsa BBBBetc\n' ) key = 'one.example.com ssh-rsa AAAAetc\n' diff = compute_diff(path, found_line=None, replace_or_add=False, state='absent', key=key) self.assertEqual(diff, { 'before_header': path, 'after_header': path, 'before': 'two.example.com ssh-rsa BBBBetc\n', 'after': 'two.example.com ssh-rsa BBBBetc\n', }) def test_sanity_check(self): basic._load_params = lambda: {} # Module used internally to execute ssh-keygen system executable module = AnsibleModule(argument_spec={}) host = '10.0.0.1' key = '%s ssh-rsa ASDF foo@bar' % (host,) keygen = module.get_bin_path('ssh-keygen') sanity_check(module, host, key, keygen)
gpl-3.0
hyperized/ansible
lib/ansible/modules/cloud/memset/memset_zone_domain.py
31
9077
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (c) 2018, Simon Weald <ansible@simonweald.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: memset_zone_domain author: "Simon Weald (@glitchcrab)" version_added: "2.6" short_description: Create and delete domains in Memset DNS zones. notes: - Zone domains can be thought of as a collection of domains, all of which share the same DNS records (i.e. they point to the same IP). An API key generated via the Memset customer control panel is needed with the following minimum scope - I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list). - Currently this module can only create one domain at a time. Multiple domains should be created using C(with_items). description: - Manage DNS zone domains in a Memset account. options: state: default: present description: - Indicates desired state of resource. choices: [ absent, present ] api_key: required: true description: - The API key obtained from the Memset control panel. domain: required: true description: - The zone domain name. Ensure this value has at most 250 characters. aliases: ['name'] zone: required: true description: - The zone to add the domain to (this must already exist). ''' EXAMPLES = ''' # Create the zone domain 'test.com' - name: create zone domain memset_zone_domain: domain: test.com zone: testzone state: present api_key: 5eb86c9196ab03919abcf03857163741 delegate_to: localhost ''' RETURN = ''' memset_api: description: Domain info from the Memset API returned: when changed or state == present type: complex contains: domain: description: Domain name returned: always type: str sample: "example.com" id: description: Domain ID returned: always type: str sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.memset import get_zone_id from ansible.module_utils.memset import check_zone_domain from ansible.module_utils.memset import memset_api_call def api_validation(args=None): ''' Perform some validation which will be enforced by Memset's API (see: https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create) ''' # zone domain length must be less than 250 chars if len(args['domain']) > 250: stderr = 'Zone domain must be less than 250 characters in length.' module.fail_json(failed=True, msg=stderr) def check(args=None): ''' Support for running with check mode. ''' retvals = dict() has_changed = False api_method = 'dns.zone_domain_list' has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) domain_exists = check_zone_domain(data=response, domain=args['domain']) # set changed to true if the operation would cause a change. has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present')) retvals['changed'] = has_changed retvals['failed'] = has_failed return(retvals) def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None): ''' At this point we already know whether the containing zone exists, so we just need to create the domain (or exit if it already exists). ''' has_changed, has_failed = False, False msg = None api_method = 'dns.zone_domain_list' _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) for zone_domain in response.json(): if zone_domain['domain'] == args['domain']: # zone domain already exists, nothing to change. has_changed = False break else: # we need to create the domain api_method = 'dns.zone_domain_create' payload['domain'] = args['domain'] payload['zone_id'] = zone_id has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) if not has_failed: has_changed = True return(has_failed, has_changed, msg) def delete_zone_domain(args=None, payload=None): ''' Deletion is pretty simple, domains are always unique so we we don't need to do any sanity checking to avoid deleting the wrong thing. ''' has_changed, has_failed = False, False msg, memset_api = None, None api_method = 'dns.zone_domain_list' _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) domain_exists = check_zone_domain(data=response, domain=args['domain']) if domain_exists: api_method = 'dns.zone_domain_delete' payload['domain'] = args['domain'] has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) if not has_failed: has_changed = True memset_api = response.json() # unset msg as we don't want to return unecessary info to the user. msg = None return(has_failed, has_changed, memset_api, msg) def create_or_delete_domain(args=None): ''' We need to perform some initial sanity checking and also look up required info before handing it off to create or delete. ''' retvals, payload = dict(), dict() has_changed, has_failed = False, False msg, stderr, memset_api = None, None, None # get the zones and check if the relevant zone exists. api_method = 'dns.zone_list' has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method) if has_failed: # this is the first time the API is called; incorrect credentials will # manifest themselves at this point so we need to ensure the user is # informed of the reason. retvals['failed'] = has_failed retvals['msg'] = msg retvals['stderr'] = "API returned an error: {0}" . format(response.status_code) return(retvals) zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json()) if not zone_exists: # the zone needs to be unique - this isn't a requirement of Memset's API but it # makes sense in the context of this module. has_failed = True if counter == 0: stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone']) elif counter > 1: stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone']) retvals['failed'] = has_failed retvals['msg'] = stderr return(retvals) if args['state'] == 'present': has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload) if args['state'] == 'absent': has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload) retvals['changed'] = has_changed retvals['failed'] = has_failed for val in ['msg', 'stderr', 'memset_api']: if val is not None: retvals[val] = eval(val) return(retvals) def main(): global module module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), api_key=dict(required=True, type='str', no_log=True), domain=dict(required=True, aliases=['name'], type='str'), zone=dict(required=True, type='str') ), supports_check_mode=True ) # populate the dict with the user-provided vars. args = dict() for key, arg in module.params.items(): args[key] = arg args['check_mode'] = module.check_mode # validate some API-specific limitations. api_validation(args=args) if module.check_mode: retvals = check(args) else: retvals = create_or_delete_domain(args) # we would need to populate the return values with the API's response # in several places so it's easier to do it at the end instead. if not retvals['failed']: if args['state'] == 'present' and not module.check_mode: payload = dict() payload['domain'] = args['domain'] api_method = 'dns.zone_domain_info' _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload) retvals['memset_api'] = response.json() if retvals['failed']: module.fail_json(**retvals) else: module.exit_json(**retvals) if __name__ == '__main__': main()
gpl-3.0
amwelch/a10sdk-python
a10sdk/core/A10_file/file_auth_portal_image_oper.py
2
2130
from a10sdk.common.A10BaseClass import A10BaseClass class FileList(A10BaseClass): """This class does not support CRUD Operations please use parent. :param file: {"type": "string", "format": "string"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "file-list" self.DeviceProxy = "" self.A10WW_file = "" for keys, value in kwargs.items(): setattr(self,keys, value) class Oper(A10BaseClass): """This class does not support CRUD Operations please use parent. :param file_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"optional": true, "file": {"type": "string", "format": "string"}}}]} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "oper" self.DeviceProxy = "" self.file_list = [] for keys, value in kwargs.items(): setattr(self,keys, value) class AuthPortalImage(A10BaseClass): """Class Description:: Operational Status for the object auth-portal-image. Class auth-portal-image supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/file/auth-portal-image/oper`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required=[] self.b_key = "auth-portal-image" self.a10_url="/axapi/v3/file/auth-portal-image/oper" self.DeviceProxy = "" self.oper = {} for keys, value in kwargs.items(): setattr(self,keys, value)
apache-2.0
smallyear/linuxLearn
salt/salt/states/dellchassis.py
3
25828
# -*- coding: utf-8 -*- ''' Manage chassis via Salt Proxies. .. versionadded:: 2015.8.2 Below is an example state that sets basic parameters: .. code-block:: yaml my-dell-chassis: dellchassis.chassis: - chassis_name: my-dell-chassis - datacenter: dc-1-us - location: my-location - mode: 2 - idrac_launch: 1 - slot_names: - server-1: my-slot-name - server-2: my-other-slot-name - blade_power_states: - server-1: on - server-2: off - server-3: powercycle However, it is possible to place the entire set of chassis configuration data in pillar. Here's an example pillar structure: .. code-block:: yaml proxy: host: 10.27.20.18 admin_username: root fallback_admin_username: root passwords: - super-secret - old-secret proxytype: fx2 chassis: name: fx2-1 username: root password: saltstack1 datacenter: london location: rack-1-shelf-3 management_mode: 2 idrac_launch: 0 slot_names: - 'server-1': blade1 - 'server-2': blade2 servers: server-1: idrac_password: saltstack1 ipmi_over_lan: True ip: 172.17.17.132 netmask: 255.255.0.0 gateway: 172.17.17.1 server-2: idrac_password: saltstack1 ipmi_over_lan: True ip: 172.17.17.2 netmask: 255.255.0.0 gateway: 172.17.17.1 server-3: idrac_password: saltstack1 ipmi_over_lan: True ip: 172.17.17.20 netmask: 255.255.0.0 gateway: 172.17.17.1 server-4: idrac_password: saltstack1 ipmi_over_lan: True ip: 172.17.17.2 netmask: 255.255.0.0 gateway: 172.17.17.1 switches: switch-1: ip: 192.168.1.2 netmask: 255.255.255.0 gateway: 192.168.1.1 snmp: nonpublic password: saltstack1 switch-2: ip: 192.168.1.3 netmask: 255.255.255.0 gateway: 192.168.1.1 snmp: nonpublic password: saltstack1 And to go with it, here's an example state that pulls the data from the pillar stated above: .. code-block:: yaml {% set details = pillar.get('proxy:chassis', {}) %} standup-step1: dellchassis.chassis: - name: {{ details['name'] }} - location: {{ details['location'] }} - mode: {{ details['management_mode'] }} - idrac_launch: {{ details['idrac_launch'] }} - slot_names: {% for entry details['slot_names'] %} - {{ entry.keys()[0] }}: {{ entry[entry.keys()[0]] }} {% endfor %} blade_powercycle: dellchassis.chassis: - blade_power_states: - server-1: powercycle - server-2: powercycle - server-3: powercycle - server-4: powercycle # Set idrac_passwords for blades. racadm needs them to be called 'server-x' {% for k, v in details['servers'].iteritems() %} {{ k }}: dellchassis.blade_idrac: - idrac_password: {{ v['idrac_password'] }} {% endfor %} # Set management ip addresses, passwords, and snmp strings for switches {% for k, v in details['switches'].iteritems() %} {{ k }}-switch-setup: dellchassis.switch: - name: {{ k }} - ip: {{ v['ip'] }} - netmask: {{ v['netmask'] }} - gateway: {{ v['gateway'] }} - password: {{ v['password'] }} - snmp: {{ v['snmp'] }} {% endfor %} .. note:: This state module relies on the dracr.py execution module, which runs racadm commands on the chassis, blades, etc. The racadm command runs very slowly and, depending on your state, the proxy minion return might timeout before the racadm commands have completed. If you are repeatedly seeing minions timeout after state calls, please use the ``-t`` CLI argument to increase the timeout variable. For example: .. code-block:: bash salt '*' state.sls my-dell-chasis-state-name -t 60 .. note:: The Dell CMC units perform adequately but many iDRACs are **excruciatingly** slow. Some functions can take minutes to execute. ''' # Import python libs from __future__ import absolute_import import logging import os log = logging.getLogger(__name__) from salt.exceptions import CommandExecutionError def __virtual__(): return 'chassis.cmd' in __salt__ def blade_idrac(name, idrac_password=None, idrac_ipmi=None, idrac_ip=None, idrac_netmask=None, idrac_gateway=None, idrac_dnsname=None, idrac_dhcp=None): ''' Set parameters for iDRAC in a blade. :param idrac_password: Password to use to connect to the iDRACs directly (idrac_ipmi and idrac_dnsname must be set directly on the iDRAC. They can't be set through the CMC. If this password is present, use it instead of the CMC password) :param idrac_ipmi: Enable/Disable IPMI over LAN :param idrac_ip: Set IP address for iDRAC :param idrac_netmask: Set netmask for iDRAC :param idrac_gateway: Set gateway for iDRAC :param idrac_dhcp: Turn on DHCP for iDRAC (True turns on, False does nothing becaause setting a static IP will disable DHCP). :return: A standard Salt changes dictionary NOTE: If any of the IP address settings is configured, all of ip, netmask, and gateway must be present ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} if not idrac_password: (username, password) = __salt__['chassis.chassis_credentials']() else: password = idrac_password module_network = __salt__['chassis.cmd']('network_info', module=name) current_idrac_ip = module_network['Network']['IP Address'] if idrac_ipmi is not None: if idrac_ipmi is True or idrac_ipmi == 1: idrac_ipmi = '1' if idrac_ipmi is False or idrac_ipmi == 0: idrac_ipmi = '0' current_ipmi = __salt__['dracr.get_general']('cfgIpmiLan', 'cfgIpmiLanEnable', host=current_idrac_ip, admin_username='root', admin_password=password) if current_ipmi != idrac_ipmi: ch = {'Old': current_ipmi, 'New': idrac_ipmi} ret['changes']['IPMI'] = ch if idrac_dnsname is not None: dnsret = __salt__['dracr.get_dns_dracname'](host=current_idrac_ip, admin_username='root', admin_password=password) current_dnsname = dnsret['[Key=iDRAC.Embedded.1#NIC.1]']['DNSRacName'] if current_dnsname != idrac_dnsname: ch = {'Old': current_dnsname, 'New': idrac_dnsname} ret['changes']['DNSRacName'] = ch if idrac_dhcp is not None or idrac_ip or idrac_netmask or idrac_gateway: if idrac_dhcp is True or idrac_dhcp == 1: idrac_dhcp = 1 else: idrac_dhcp = 0 if str(module_network['Network']['DHCP Enabled']) == '0' and idrac_dhcp == 1: ch = {'Old': module_network['Network']['DHCP Enabled'], 'New': idrac_dhcp} ret['changes']['DRAC DHCP'] = ch if idrac_dhcp == 0 and all([idrac_ip, idrac_netmask, idrac_netmask]): current_network = __salt__['chassis.cmd']('network_info', module=name) old_ipv4 = {} new_ipv4 = {} if current_network['Network']['IP Address'] != idrac_ip: old_ipv4['ip'] = current_network['Network']['IP Address'] new_ipv4['ip'] = idrac_ip if current_network['Network']['Subnet Mask'] != idrac_netmask: old_ipv4['netmask'] = current_network['Network']['Subnet Mask'] new_ipv4['netmask'] = idrac_netmask if current_network['Network']['Gateway'] != idrac_gateway: old_ipv4['gateway'] = current_network['Network']['Gateway'] new_ipv4['gateway'] = idrac_gateway if new_ipv4 != {}: ret['changes']['Network'] = {} ret['changes']['Network']['Old'] = old_ipv4 ret['changes']['Network']['New'] = new_ipv4 if ret['changes'] == {}: ret['comment'] = 'iDRAC on blade is already in the desired state.' return ret if __opts__['test'] and ret['changes'] != {}: ret['result'] = None ret['comment'] = 'iDRAC on blade will change.' return ret if 'IPMI' in ret['changes']: ipmi_result = __salt__['dracr.set_general']('cfgIpmiLan', 'cfgIpmiLanEnable', idrac_ipmi, host=current_idrac_ip, admin_username='root', admin_password=password) if not ipmi_result: ret['result'] = False ret['changes']['IPMI']['success'] = False if 'DNSRacName' in ret['changes']: dnsracname_result = __salt__['dracr.set_dns_dracname'](idrac_dnsname, host=current_idrac_ip, admin_username='root', admin_password=password) if dnsracname_result['retcode'] == 0: ret['changes']['DNSRacName']['success'] = True else: ret['result'] = False ret['changes']['DNSRacName']['success'] = False ret['changes']['DNSRacName']['return'] = dnsracname_result if 'DRAC DHCP' in ret['changes']: dhcp_result = __salt__['chassis.cmd']('set_niccfg', dhcp=idrac_dhcp) if dhcp_result['retcode']: ret['changes']['DRAC DHCP']['success'] = True else: ret['result'] = False ret['changes']['DRAC DHCP']['success'] = False ret['changes']['DRAC DHCP']['return'] = dhcp_result if 'Network' in ret['changes']: network_result = __salt__['chassis.cmd']('set_niccfg', ip=idrac_ip, netmask=idrac_netmask, gateway=idrac_gateway, module=name) if network_result['retcode'] == 0: ret['changes']['Network']['success'] = True else: ret['result'] = False ret['changes']['Network']['success'] = False ret['changes']['Network']['return'] = network_result return ret def chassis(name, chassis_name=None, password=None, datacenter=None, location=None, mode=None, idrac_launch=None, slot_names=None, blade_power_states=None): ''' Manage a Dell Chassis. chassis_name The name of the chassis. datacenter The datacenter in which the chassis is located location The location of the chassis. password Password for the chassis. Note: If this password is set for the chassis, the current implementation of this state will set this password both on the chassis and the iDrac passwords on any configured blades. If the password for the blades should be distinct, they should be set separately with the blade_idrac function. mode The management mode of the chassis. Viable options are: - 0: None - 1: Monitor - 2: Manage and Monitor idrac_launch The iDRAC launch method of the chassis. Viable options are: - 0: Disabled (launch iDRAC using IP address) - 1: Enabled (launch iDRAC using DNS name) slot_names The names of the slots, provided as a list identified by their slot numbers. blade_power_states The power states of a blade server, provided as a list and identified by their server numbers. Viable options are: - on: Ensure the blade server is powered on. - off: Ensure the blade server is powered off. - powercycle: Power cycle the blade server. Example: .. code-block:: yaml my-dell-chassis: dellchassis.chassis: - chassis_name: my-dell-chassis - location: my-location - datacenter: london - mode: 2 - idrac_launch: 1 - slot_names: - 1: my-slot-name - 2: my-other-slot-name - blade_power_states: - server-1: on - server-2: off - server-3: powercycle ''' ret = {'name': chassis_name, 'chassis_name': chassis_name, 'result': True, 'changes': {}, 'comment': ''} chassis_cmd = 'chassis.cmd' cfg_tuning = 'cfgRacTuning' mode_cmd = 'cfgRacTuneChassisMgmtAtServer' launch_cmd = 'cfgRacTuneIdracDNSLaunchEnable' inventory = __salt__[chassis_cmd]('inventory') if idrac_launch: idrac_launch = str(idrac_launch) current_name = __salt__[chassis_cmd]('get_chassis_name') if chassis_name != current_name: ret['changes'].update({'Name': {'Old': current_name, 'New': chassis_name}}) current_dc = __salt__[chassis_cmd]('get_chassis_datacenter') if datacenter and datacenter != current_dc: ret['changes'].update({'Datacenter': {'Old': current_dc, 'New': datacenter}}) if password: ret['changes'].update({'Password': {'Old': '******', 'New': '******'}}) if location: current_location = __salt__[chassis_cmd]('get_chassis_location') if location != current_location: ret['changes'].update({'Location': {'Old': current_location, 'New': location}}) if mode: current_mode = __salt__[chassis_cmd]('get_general', cfg_tuning, mode_cmd) if mode != current_mode: ret['changes'].update({'Management Mode': {'Old': current_mode, 'New': mode}}) if idrac_launch: current_launch_method = __salt__[chassis_cmd]('get_general', cfg_tuning, launch_cmd) if idrac_launch != current_launch_method: ret['changes'].update({'iDrac Launch Method': {'Old': current_launch_method, 'New': idrac_launch}}) if slot_names: current_slot_names = __salt__[chassis_cmd]('list_slotnames') for s in slot_names: key = s.keys()[0] new_name = s[key] if key.startswith('slot-'): key = key[5:] current_slot_name = current_slot_names.get(key).get('slotname') if current_slot_name != new_name: old = {key: current_slot_name} new = {key: new_name} if ret['changes'].get('Slot Names') is None: ret['changes'].update({'Slot Names': {'Old': {}, 'New': {}}}) ret['changes']['Slot Names']['Old'].update(old) ret['changes']['Slot Names']['New'].update(new) current_power_states = {} target_power_states = {} if blade_power_states: for b in blade_power_states: key = b.keys()[0] status = __salt__[chassis_cmd]('server_powerstatus', module=key) current_power_states[key] = status.get('status', -1) if b[key] == 'powerdown': if current_power_states[key] != -1 and current_power_states[key]: target_power_states[key] = 'powerdown' if b[key] == 'powerup': if current_power_states[key] != -1 and not current_power_states[key]: target_power_states[key] = 'powerup' if b[key] == 'powercycle': if current_power_states[key] != -1 and not current_power_states[key]: target_power_states[key] = 'powerup' if current_power_states[key] != -1 and current_power_states[key]: target_power_states[key] = 'powercycle' for k, v in target_power_states.iteritems(): old = {k: current_power_states[k]} new = {k: v} if ret['changes'].get('Blade Power States') is None: ret['changes'].update({'Blade Power States': {'Old': {}, 'New': {}}}) ret['changes']['Blade Power States']['Old'].update(old) ret['changes']['Blade Power States']['New'].update(new) if ret['changes'] == {}: ret['comment'] = 'Dell chassis is already in the desired state.' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Dell chassis configuration will change.' return ret # Finally, set the necessary configurations on the chassis. name = __salt__[chassis_cmd]('set_chassis_name', chassis_name) if location: location = __salt__[chassis_cmd]('set_chassis_location', location) pw_result = True if password: pw_single = True if __salt__[chassis_cmd]('change_password', username='root', uid=1, password=password): for blade in inventory['server'].keys(): pw_single = __salt__[chassis_cmd]('deploy_password', username='root', password=password, module=blade) if not pw_single: pw_result = False else: pw_result = False if datacenter: datacenter_result = __salt__[chassis_cmd]('set_chassis_datacenter', datacenter) if mode: mode = __salt__[chassis_cmd]('set_general', cfg_tuning, mode_cmd, mode) if idrac_launch: idrac_launch = __salt__[chassis_cmd]('set_general', cfg_tuning, launch_cmd, idrac_launch) if ret['changes'].get('Slot Names') is not None: slot_rets = [] for s in slot_names: key = s.keys()[0] new_name = s[key] if key.startswith('slot-'): key = key[5:] slot_rets.append(__salt__[chassis_cmd]('set_slotname', key, new_name)) if any(slot_rets) is False: slot_names = False else: slot_names = True powerchange_all_ok = True for k, v in target_power_states.iteritems(): powerchange_ok = __salt__[chassis_cmd]('server_power', v, module=k) if not powerchange_ok: powerchange_all_ok = False if any([name, location, mode, idrac_launch, slot_names, powerchange_all_ok]) is False: ret['result'] = False ret['comment'] = 'There was an error setting the Dell chassis.' ret['comment'] = 'Dell chassis was updated.' return ret def switch(name, ip=None, netmask=None, gateway=None, dhcp=None, password=None, snmp=None): ''' Manage switches in a Dell Chassis. name The switch designation (e.g. switch-1, switch-2) ip The Static IP Address of the switch netmask The netmask for the static IP gateway The gateway for the static IP dhcp True: Enable DHCP False: Do not change DHCP setup (disabling DHCP is automatic when a static IP is set) password The access (root) password for the switch snmp The SNMP community string for the switch Example: .. code-block:: yaml my-dell-chassis: dellchassis.switch: - switch: switch-1 - ip: 192.168.1.1 - netmask: 255.255.255.0 - gateway: 192.168.1.254 - dhcp: True - password: secret - snmp: public ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} current_nic = __salt__['chassis.cmd']('network_info', module=name) try: if current_nic.get('retcode', 0) != 0: ret['result'] = False ret['comment'] = current_nic['stdout'] return ret if ip or netmask or gateway: if not ip: ip = current_nic['Network']['IP Address'] if not netmask: ip = current_nic['Network']['Subnet Mask'] if not gateway: ip = current_nic['Network']['Gateway'] if current_nic['Network']['DHCP Enabled'] == '0' and dhcp: ret['changes'].update({'DHCP': {'Old': {'DHCP Enabled': current_nic['Network']['DHCP Enabled']}, 'New': {'DHCP Enabled': dhcp}}}) if ((ip or netmask or gateway) and not dhcp and (ip != current_nic['Network']['IP Address'] or netmask != current_nic['Network']['Subnet Mask'] or gateway != current_nic['Network']['Gateway'])): ret['changes'].update({'IP': {'Old': current_nic['Network'], 'New': {'IP Address': ip, 'Subnet Mask': netmask, 'Gateway': gateway}}}) if password: if 'New' not in ret['changes']: ret['changes']['New'] = {} ret['changes']['New'].update({'Password': '*****'}) if snmp: if 'New' not in ret['changes']: ret['changes']['New'] = {} ret['changes']['New'].update({'SNMP': '*****'}) if ret['changes'] == {}: ret['comment'] = 'Switch ' + name + ' is already in desired state' return ret except AttributeError: ret['changes'] = {} ret['comment'] = 'Something went wrong retrieving the switch details' return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Switch ' + name + ' configuration will change' return ret # Finally, set the necessary configurations on the chassis. dhcp_ret = net_ret = password_ret = snmp_ret = True if dhcp: dhcp_ret = __salt__['chassis.cmd']('set_niccfg', module=name, dhcp=dhcp) if ip or netmask or gateway: net_ret = __salt__['chassis.cmd']('set_niccfg', ip, netmask, gateway, module=name) if password: password_ret = __salt__['chassis.cmd']('deploy_password', 'root', password, module=name) if snmp: snmp_ret = __salt__['chassis.cmd']('deploy_snmp', snmp, module=name) if any([password_ret, snmp_ret, net_ret, dhcp_ret]) is False: ret['result'] = False ret['comment'] = 'There was an error setting the switch {0}.'.format(name) ret['comment'] = 'Dell chassis switch {0} was updated.'.format(name) return ret def _firmware_update(firmwarefile='', host='', directory=''): ''' Update firmware for a single host ''' dest = os.path.join(directory, firmwarefile[7:]) __salt__['cp.get_file'](firmwarefile, dest) username = __pillar__['proxy']['admin_user'] password = __pillar__['proxy']['admin_password'] __salt__['dracr.update_firmware'](dest, host=host, admin_username=username, admin_password=password) def firmware_update(hosts=None, directory=''): ''' State to update the firmware on host using the ``racadm`` command firmwarefile filename (string) starting with ``salt://`` host string representing the hostname supplied to the ``racadm`` command directory Directory name where firmwarefile will be downloaded .. code-block:: yaml dell-chassis-firmware-update: dellchassis.firmware_update: hosts: cmc: salt://firmware_cmc.exe server-1: salt://firmware.exe directory: /opt/firmwares ''' ret = {} ret.changes = {} success = True for host, firmwarefile in hosts: try: _firmware_update(firmwarefile, host, directory) ret['changes'].update({ 'host': { 'comment': 'Firmware update submitted for {0}'.format(host), 'success': True, } }) except CommandExecutionError as err: success = False ret['changes'].update({ 'host': { 'comment': 'FAILED to update firmware for {0}'.format(host), 'success': False, 'reason': str(err), } }) ret['result'] = success return ret
apache-2.0
KerkhoffTechnologies/django-connectwise
djconnectwise/migrations/0087_auto_20190709_0836.py
1
1591
# Generated by Django 2.1 on 2019-07-09 08:36 from django.db import migrations, models import django.db.models.deletion import django_extensions.db.fields class Migration(migrations.Migration): dependencies = [ ('djconnectwise', '0086_merge_20190507_1402'), ] operations = [ migrations.CreateModel( name='WorkType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('name', models.CharField(max_length=50)), ('inactive_flag', models.BooleanField(default=False)), ], options={ 'ordering': ('-modified', '-created'), 'get_latest_by': 'modified', 'abstract': False, }, ), migrations.AddField( model_name='ticket', name='work_type', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='work_type_tickets', to='djconnectwise.WorkType'), ), migrations.AddField( model_name='timeentry', name='work_type', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='djconnectwise.WorkType'), ), ]
mit
alibaba/FlexGW
website/vpn/sts/views.py
2
4111
# -*- coding: utf-8 -*- """ website.vpn.sts.views ~~~~~~~~~~~~~~~~~~~~~ vpn sts views: /vpn/sts/add /vpn/sts/<int:id> /vpn/sts/<int:id>/settings """ from flask import Blueprint, render_template from flask import url_for, redirect from flask import flash from website.vpn.sts.forms import AddForm from website.vpn.sts.forms import ConsoleForm, UpDownForm from website.vpn.sts.services import vpn_settings, vpn_del from website.vpn.sts.services import get_tunnels, VpnServer from website.vpn.sts.models import Tunnels from flask.ext.login import login_required sts = Blueprint('sts', __name__, url_prefix='/vpn/sts', template_folder='templates') @sts.route('/') @login_required def index(): form = UpDownForm() tunnels = get_tunnels(status=True) if not tunnels: flash(u'目前没有任何VPN 配置,如有需要请添加。', 'info') return render_template('sts/index.html', tunnels=tunnels, form=form) @sts.route('/add', methods=['GET', 'POST']) @login_required def add(): form = AddForm() if form.validate_on_submit(): if not Tunnels.query.filter_by(name=form.tunnel_name.data).first(): if vpn_settings(form): message = u'添加Site-to-Site 隧道成功!' flash(message, 'success') return redirect(url_for('sts.index')) else: message = u'该隧道已经存在:%s' % form.tunnel_name.data flash(message, 'alert') return render_template('sts/add.html', form=form) @sts.route('/<int:id>/settings', methods=['GET', 'POST']) @login_required def settings(id): form = AddForm() tunnel = get_tunnels(id) if form.validate_on_submit(): if form.delete.data: if vpn_del(id): message = u'删除隧道%s :成功!' % tunnel[0]['name'] flash(message, 'success') return redirect(url_for('sts.index')) if form.save.data: if vpn_settings(form, id): flash(u'修改隧道配置成功!', 'success') return redirect(url_for('sts.settings', id=id)) form.local_subnet.data = tunnel[0]['rules']['leftsubnet'] form.remote_subnet.data = tunnel[0]['rules']['rightsubnet'] form.start_type.data = tunnel[0]['rules']['auto'] # Backward compatible v1.1.0 esp_settings = tunnel[0]['rules']['esp'].split('-') form.esp_encryption_algorithm.data = esp_settings[0] form.esp_integrity_algorithm.data = esp_settings[1] form.esp_dh_algorithm.data = esp_settings[2] if len(esp_settings) == 3 else 'null' ike_settings = tunnel[0]['rules'].get('ike', 'aes128-sha1-modp2048').split('-') form.ike_encryption_algorithm.data = ike_settings[0] form.ike_integrity_algorithm.data = ike_settings[1] form.ike_dh_algorithm.data = ike_settings[2] return render_template('sts/view.html', tunnel=tunnel[0], form=form) @sts.route('/<int:id>/flow') @login_required def flow(id): tunnel = get_tunnels(id, status=True) return render_template('sts/flow.html', tunnel=tunnel[0]) @sts.route('/console', methods=['GET', 'POST']) @login_required def console(): form = ConsoleForm() vpn = VpnServer() if form.validate_on_submit(): if form.stop.data and vpn.stop: flash(u'VPN 服务停止成功!', 'success') if form.start.data and vpn.start: flash(u'VPN 服务启动成功!', 'success') if form.re_load.data and vpn.reload: flash(u'VPN 服务配置生效完成!', 'success') return render_template('sts/console.html', status=vpn.status, form=form) @sts.route('/updown', methods=['POST']) @login_required def updown(): form = UpDownForm() vpn = VpnServer() if form.validate_on_submit(): if form.up.data and vpn.tunnel_up(form.tunnel_name.data): flash(u'隧道连接成功!', 'success') if form.down.data and vpn.tunnel_down(form.tunnel_name.data): flash(u'隧道断开成功!', 'success') return redirect(url_for('sts.index'))
bsd-3-clause
pdxwebdev/yadapy
yadapy/tests.py
1
1145
import unittest from node import Identity, NewNode, NewRelation, NewRelationship class TestMongoNode(unittest.TestCase): def test_relationship_identifier(self): pass def test_shared_secret(self): pass def test_identity(self): assert Identity('foo', 'http://imgur.com') def test_new_node(self): identity = Identity('foo', 'http://imgur.com') assert NewNode(identity) def test_relation(self): identity = Identity('foo', 'http://imgur.com') assert NewRelation(identity) def test_relationship(self): identity = Identity('foo', 'http://imgur.com') relation1 = NewRelation(identity) identity = Identity('bar', 'http://imgur.com') relation2 = NewRelation(identity) assert NewRelationship(relation1, relation2) def test_add_relation(self): identity = Identity('foo', 'http://imgur.com') node = NewNode(identity) identity = Identity('bar', 'http://imgur.com') node = NewNode(identity) relation = NewRelation(node) node.addRelation(relation) assert node.relations
gpl-3.0
wxue/xiakelite
libs/jinja2/sandbox.py
284
9423
# -*- coding: utf-8 -*- """ jinja2.sandbox ~~~~~~~~~~~~~~ Adds a sandbox layer to Jinja as it was the default behavior in the old Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the default behavior is easier to use. The behavior can be changed by subclassing the environment. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ import operator from jinja2.runtime import Undefined from jinja2.environment import Environment from jinja2.exceptions import SecurityError from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \ FrameType, GeneratorType #: maximum number of items a range may produce MAX_RANGE = 100000 #: attributes of function objects that are considered unsafe. UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict', 'func_defaults', 'func_globals']) #: unsafe method attributes. function attributes are unsafe for methods too UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self']) import warnings # make sure we don't warn in python 2.6 about stuff we don't care about warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning, module='jinja2.sandbox') from collections import deque _mutable_set_types = (set,) _mutable_mapping_types = (dict,) _mutable_sequence_types = (list,) # on python 2.x we can register the user collection types try: from UserDict import UserDict, DictMixin from UserList import UserList _mutable_mapping_types += (UserDict, DictMixin) _mutable_set_types += (UserList,) except ImportError: pass # if sets is still available, register the mutable set from there as well try: from sets import Set _mutable_set_types += (Set,) except ImportError: pass #: register Python 2.6 abstract base classes try: from collections import MutableSet, MutableMapping, MutableSequence _mutable_set_types += (MutableSet,) _mutable_mapping_types += (MutableMapping,) _mutable_sequence_types += (MutableSequence,) except ImportError: pass _mutable_spec = ( (_mutable_set_types, frozenset([ 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove', 'symmetric_difference_update', 'update' ])), (_mutable_mapping_types, frozenset([ 'clear', 'pop', 'popitem', 'setdefault', 'update' ])), (_mutable_sequence_types, frozenset([ 'append', 'reverse', 'insert', 'sort', 'extend', 'remove' ])), (deque, frozenset([ 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop', 'popleft', 'remove', 'rotate' ])) ) def safe_range(*args): """A range that can't generate ranges with a length of more than MAX_RANGE items. """ rng = xrange(*args) if len(rng) > MAX_RANGE: raise OverflowError('range too big, maximum size for range is %d' % MAX_RANGE) return rng def unsafe(f): """ Mark a function or method as unsafe:: @unsafe def delete(self): pass """ f.unsafe_callable = True return f def is_internal_attribute(obj, attr): """Test if the attribute given is an internal python attribute. For example this function returns `True` for the `func_code` attribute of python objects. This is useful if the environment method :meth:`~SandboxedEnvironment.is_safe_attribute` is overriden. >>> from jinja2.sandbox import is_internal_attribute >>> is_internal_attribute(lambda: None, "func_code") True >>> is_internal_attribute((lambda x:x).func_code, 'co_code') True >>> is_internal_attribute(str, "upper") False """ if isinstance(obj, FunctionType): if attr in UNSAFE_FUNCTION_ATTRIBUTES: return True elif isinstance(obj, MethodType): if attr in UNSAFE_FUNCTION_ATTRIBUTES or \ attr in UNSAFE_METHOD_ATTRIBUTES: return True elif isinstance(obj, type): if attr == 'mro': return True elif isinstance(obj, (CodeType, TracebackType, FrameType)): return True elif isinstance(obj, GeneratorType): if attr == 'gi_frame': return True return attr.startswith('__') def modifies_known_mutable(obj, attr): """This function checks if an attribute on a builtin mutable object (list, dict, set or deque) would modify it if called. It also supports the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and with Python 2.6 onwards the abstract base classes `MutableSet`, `MutableMapping`, and `MutableSequence`. >>> modifies_known_mutable({}, "clear") True >>> modifies_known_mutable({}, "keys") False >>> modifies_known_mutable([], "append") True >>> modifies_known_mutable([], "index") False If called with an unsupported object (such as unicode) `False` is returned. >>> modifies_known_mutable("foo", "upper") False """ for typespec, unsafe in _mutable_spec: if isinstance(obj, typespec): return attr in unsafe return False class SandboxedEnvironment(Environment): """The sandboxed environment. It works like the regular environment but tells the compiler to generate sandboxed code. Additionally subclasses of this environment may override the methods that tell the runtime what attributes or functions are safe to access. If the template tries to access insecure code a :exc:`SecurityError` is raised. However also other exceptions may occour during the rendering so the caller has to ensure that all exceptions are catched. """ sandboxed = True def __init__(self, *args, **kwargs): Environment.__init__(self, *args, **kwargs) self.globals['range'] = safe_range def is_safe_attribute(self, obj, attr, value): """The sandboxed environment will call this method to check if the attribute of an object is safe to access. Per default all attributes starting with an underscore are considered private as well as the special attributes of internal python objects as returned by the :func:`is_internal_attribute` function. """ return not (attr.startswith('_') or is_internal_attribute(obj, attr)) def is_safe_callable(self, obj): """Check if an object is safely callable. Per default a function is considered safe unless the `unsafe_callable` attribute exists and is True. Override this method to alter the behavior, but this won't affect the `unsafe` decorator from this module. """ return not (getattr(obj, 'unsafe_callable', False) or \ getattr(obj, 'alters_data', False)) def getitem(self, obj, argument): """Subscribe an object from sandboxed code.""" try: return obj[argument] except (TypeError, LookupError): if isinstance(argument, basestring): try: attr = str(argument) except: pass else: try: value = getattr(obj, attr) except AttributeError: pass else: if self.is_safe_attribute(obj, argument, value): return value return self.unsafe_undefined(obj, argument) return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): """Subscribe an object from sandboxed code and prefer the attribute. The attribute passed *must* be a bytestring. """ try: value = getattr(obj, attribute) except AttributeError: try: return obj[attribute] except (TypeError, LookupError): pass else: if self.is_safe_attribute(obj, attribute, value): return value return self.unsafe_undefined(obj, attribute) return self.undefined(obj=obj, name=attribute) def unsafe_undefined(self, obj, attribute): """Return an undefined object for unsafe attributes.""" return self.undefined('access to attribute %r of %r ' 'object is unsafe.' % ( attribute, obj.__class__.__name__ ), name=attribute, obj=obj, exc=SecurityError) def call(__self, __context, __obj, *args, **kwargs): """Call an object from sandboxed code.""" # the double prefixes are to avoid double keyword argument # errors when proxying the call. if not __self.is_safe_callable(__obj): raise SecurityError('%r is not safely callable' % (__obj,)) return __context.call(__obj, *args, **kwargs) class ImmutableSandboxedEnvironment(SandboxedEnvironment): """Works exactly like the regular `SandboxedEnvironment` but does not permit modifications on the builtin mutable objects `list`, `set`, and `dict` by using the :func:`modifies_known_mutable` function. """ def is_safe_attribute(self, obj, attr, value): if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value): return False return not modifies_known_mutable(obj, attr)
mit
akaihola/hardlinkpy
tests.py
1
5602
#!/usr/bin/env python import os import sys import tempfile import time import unittest import hardlink testdata1 = "1234" * 1024 + "abc" testdata2 = "1234" * 1024 + "xyz" def get_inode(filename): return os.lstat(filename).st_ino class TestHappy(unittest.TestCase): def setUp(self): self.root = tempfile.mkdtemp() os.chdir(self.root) self.testfs = { "dir1/name1.ext": testdata1, "dir1/name2.ext": testdata1, "dir1/name3.ext": testdata2, "dir2/name1.ext": testdata1, "dir3/name1.ext": testdata2, "dir3/name1.noext": testdata1, "dir4/name1.ext": testdata1, } for dir in ("dir1", "dir2", "dir3", "dir4", "dir5"): os.mkdir(dir) for filename, contents in self.testfs.items(): with open(filename, "w") as f: f.write(contents) now = time.time() other = now - 2 for filename in ("dir1/name1.ext", "dir1/name2.ext", "dir1/name3.ext", "dir2/name1.ext", "dir3/name1.ext", "dir3/name1.noext"): os.utime(filename, (now, now)) os.utime("dir4/name1.ext", (other, other)) # os.chown("dir5/name1.ext", os.getuid(), ...) # -c, --content-only Only file contents have to match os.link("dir1/name1.ext", "dir1/link") self.verify_file_contents() def verify_file_contents(self): for filename, contents in self.testfs.items(): with open(filename, "r") as f: actual = f.read() self.assertEqual(actual, contents) # Bug? Should hardlink to the file with most existing links? # self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir1/link")) def test_hardlink_tree_dryrun(self): sys.argv = ["hardlink.py", "-v", "0", "--no-stats", "--dry-run", self.root] hardlink.main() self.verify_file_contents() self.assertEqual(os.lstat("dir1/name1.ext").st_nlink, 2) # Existing link self.assertEqual(os.lstat("dir1/name2.ext").st_nlink, 1) self.assertEqual(os.lstat("dir1/name3.ext").st_nlink, 1) self.assertEqual(os.lstat("dir2/name1.ext").st_nlink, 1) self.assertEqual(os.lstat("dir3/name1.ext").st_nlink, 1) self.assertEqual(os.lstat("dir3/name1.noext").st_nlink, 1) self.assertEqual(os.lstat("dir4/name1.ext").st_nlink, 1) def test_hardlink_tree(self): sys.argv = ["hardlink.py", "-v", "0", "--no-stats", self.root] hardlink.main() self.verify_file_contents() self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir1/name2.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir2/name1.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir3/name1.noext")) self.assertEqual(get_inode("dir1/name3.ext"), get_inode("dir3/name1.ext")) self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir4/name1.ext")) def test_hardlink_tree_filenames_equal(self): sys.argv = ["hardlink.py", "-v", "0", "--no-stats", "--filenames-equal", self.root] hardlink.main() self.verify_file_contents() self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir1/name2.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir2/name1.ext")) self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir3/name1.noext")) self.assertNotEqual(get_inode("dir1/name3.ext"), get_inode("dir3/name1.ext")) self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir4/name1.ext")) def test_hardlink_tree_exclude(self): sys.argv = ["hardlink.py", "-v", "0", "--no-stats", "--exclude", ".*noext$", self.root] hardlink.main() self.verify_file_contents() self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir1/name2.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir2/name1.ext")) self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir3/name1.noext")) self.assertEqual(get_inode("dir1/name3.ext"), get_inode("dir3/name1.ext")) self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir4/name1.ext")) def test_hardlink_tree_timestamp_ignore(self): sys.argv = ["hardlink.py", "-v", "0", "--no-stats", "--timestamp-ignore", self.root] hardlink.main() self.verify_file_contents() self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir1/name2.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir2/name1.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir3/name1.noext")) self.assertEqual(get_inode("dir1/name3.ext"), get_inode("dir3/name1.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir4/name1.ext")) def test_hardlink_tree_match(self): sys.argv = ["hardlink.py", "-v", "0", "--no-stats", "--match", "*.ext", self.root] hardlink.main() self.verify_file_contents() self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir1/name2.ext")) self.assertEqual(get_inode("dir1/name1.ext"), get_inode("dir2/name1.ext")) self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir3/name1.noext")) self.assertEqual(get_inode("dir1/name3.ext"), get_inode("dir3/name1.ext")) self.assertNotEqual(get_inode("dir1/name1.ext"), get_inode("dir4/name1.ext")) if __name__ == '__main__': unittest.main()
gpl-2.0
stbka/ansible
lib/ansible/plugins/cache/memcached.py
193
6097
# (c) 2014, Brian Coca, Josh Drake, et al # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import collections import os import sys import time import threading from itertools import chain from ansible import constants as C from ansible.plugins.cache.base import BaseCacheModule try: import memcache except ImportError: print('python-memcached is required for the memcached fact cache') sys.exit(1) class ProxyClientPool(object): """ Memcached connection pooling for thread/fork safety. Inspired by py-redis connection pool. Available connections are maintained in a deque and released in a FIFO manner. """ def __init__(self, *args, **kwargs): self.max_connections = kwargs.pop('max_connections', 1024) self.connection_args = args self.connection_kwargs = kwargs self.reset() def reset(self): self.pid = os.getpid() self._num_connections = 0 self._available_connections = collections.deque(maxlen=self.max_connections) self._locked_connections = set() self._lock = threading.Lock() def _check_safe(self): if self.pid != os.getpid(): with self._lock: if self.pid == os.getpid(): # bail out - another thread already acquired the lock return self.disconnect_all() self.reset() def get_connection(self): self._check_safe() try: connection = self._available_connections.popleft() except IndexError: connection = self.create_connection() self._locked_connections.add(connection) return connection def create_connection(self): if self._num_connections >= self.max_connections: raise RuntimeError("Too many memcached connections") self._num_connections += 1 return memcache.Client(*self.connection_args, **self.connection_kwargs) def release_connection(self, connection): self._check_safe() self._locked_connections.remove(connection) self._available_connections.append(connection) def disconnect_all(self): for conn in chain(self._available_connections, self._locked_connections): conn.disconnect_all() def __getattr__(self, name): def wrapped(*args, **kwargs): return self._proxy_client(name, *args, **kwargs) return wrapped def _proxy_client(self, name, *args, **kwargs): conn = self.get_connection() try: return getattr(conn, name)(*args, **kwargs) finally: self.release_connection(conn) class CacheModuleKeys(collections.MutableSet): """ A set subclass that keeps track of insertion time and persists the set in memcached. """ PREFIX = 'ansible_cache_keys' def __init__(self, cache, *args, **kwargs): self._cache = cache self._keyset = dict(*args, **kwargs) def __contains__(self, key): return key in self._keyset def __iter__(self): return iter(self._keyset) def __len__(self): return len(self._keyset) def add(self, key): self._keyset[key] = time.time() self._cache.set(self.PREFIX, self._keyset) def discard(self, key): del self._keyset[key] self._cache.set(self.PREFIX, self._keyset) def remove_by_timerange(self, s_min, s_max): for k in self._keyset.keys(): t = self._keyset[k] if s_min < t < s_max: del self._keyset[k] self._cache.set(self.PREFIX, self._keyset) class CacheModule(BaseCacheModule): def __init__(self, *args, **kwargs): if C.CACHE_PLUGIN_CONNECTION: connection = C.CACHE_PLUGIN_CONNECTION.split(',') else: connection = ['127.0.0.1:11211'] self._timeout = C.CACHE_PLUGIN_TIMEOUT self._prefix = C.CACHE_PLUGIN_PREFIX self._cache = ProxyClientPool(connection, debug=0) self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or []) def _make_key(self, key): return "{0}{1}".format(self._prefix, key) def _expire_keys(self): if self._timeout > 0: expiry_age = time.time() - self._timeout self._keys.remove_by_timerange(0, expiry_age) def get(self, key): value = self._cache.get(self._make_key(key)) # guard against the key not being removed from the keyset; # this could happen in cases where the timeout value is changed # between invocations if value is None: self.delete(key) raise KeyError return value def set(self, key, value): self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1) self._keys.add(key) def keys(self): self._expire_keys() return list(iter(self._keys)) def contains(self, key): self._expire_keys() return key in self._keys def delete(self, key): self._cache.delete(self._make_key(key)) self._keys.discard(key) def flush(self): for key in self.keys(): self.delete(key) def copy(self): return self._keys.copy() def __getstate__(self): return dict() def __setstate__(self, data): self.__init__()
gpl-3.0
johnnykv/mnemosyne
webapi/mnemowebapi.py
2
5888
# Copyright (C) 2012 Johnny Vestergaard <jkv@unixcluster.dk> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import bottle import shared_state import os import uuid import shared_state as shared import logging import types from bottle import run, install, mount, request from bottle.ext import mongo from beaker.middleware import SessionMiddleware from datetime import datetime from kumo.loggly import Loggly from cork import Cork logger = logging.getLogger(__name__) class MnemoWebAPI(): """Exposes raw and normalized data from hpfeeds through a RESTful api""" def __init__(self, datebase_name, static_file_path=None, data_dir='./data', loggly_token=None): cork_dir = os.path.join(data_dir, 'cork') beaker_dir = os.path.join(data_dir, 'beaker') bottle.TEMPLATE_PATH.insert(0,'webapi/views/') #vars which must be visible across all webapi modules shared.static_dir = static_file_path shared.plug = bottle.ext.mongo.MongoPlugin(uri="localhost", db=datebase_name, json_mongo=True) #install mongo plugin for root app install(shared_state.plug) #check if cork files exists cork_files = ['users.json', 'roles.json', 'register.json'] if not set(cork_files).issubset(set(os.listdir(cork_dir))): #if not, create them logger.info('Cork authentication files not found, creating new files.') shared.auth = self.populate_conf_directory(cork_dir) else: shared.auth = Cork(cork_dir) #admin depends on shared.auth import admin #import and mount api version 1 (stable) from webapi.api.v1 import app as api_v1 mount('/api/v1/', api_v1.app) #import and mount development version (unstable) from webapi.api.d import app as api_d mount('/api/d/', api_d.app) #must be imported AFTER mounts. if shared.static_dir is not None: import default_routes #wrap root app in beaker middleware session_opts = { 'session.type': 'file', 'session.cookie_expires': False, 'session.data_dir': beaker_dir, 'session.auto': True, #set secure attribute on cookie 'session.secure': True } self.app = bottle.app() if loggly_token: self.app = Loggly(bottle.app(), loggly_token) self.app = SessionMiddleware(self.app, session_opts) root_app = bottle.app() #setup logging hooks @root_app.hook('before_request') @api_d.app.hook('before_request') @api_v1.app.hook('before_request') def log_request(): user_agent = "" if 'HTTP_USER_AGENT' in bottle.request.environ: user_agent = bottle.request.environ['HTTP_USER_AGENT'] if 'REMOTE_ADDR' in bottle.request.environ: remote_addr = bottle.request.environ['REMOTE_ADDR'] else: remote_addr = "" if 'beaker.session' in bottle.request.environ: session = bottle.request.environ.get('beaker.session') username = session.get('username', None) else: username = "None" logger.info("[{0}/{1}] {2} {3} ({4})".format(remote_addr, username, request.method, request.fullpath, user_agent)) def return_text(self, e): return e.status #make sure error pages for API are pure text api_d.app.default_error_handler = types.MethodType(return_text, self) api_v1.app.default_error_handler = types.MethodType(return_text, self) def start_listening(self, host, port): logger.info('Starting web api, listening on {0}:{1}'.format(host, port)) run(app=self.app, host=host, port=port, debug=False, server='gevent', log="wsgi", quiet=True, keyfile='server.key', certfile='server.crt') #defaults def populate_conf_directory(self, auth_dir): """ Creation of basic auth files. """ logger.info("Creating new authentication files, check STDOUT for the generated admin password.") cork = Cork(auth_dir, initialize=True) cork._store.roles['admin'] = 100 cork._store.roles['access_all'] = 70 cork._store.roles['access_normalized'] = 60 cork._store.roles['public'] = 10 cork._store.save_roles() tstamp = str(datetime.utcnow()) #default admin combo: admin/admin username = 'admin' password = str(uuid.uuid4()) cork._store.users[username] = { 'role': 'admin', 'hash': cork._hash(username, password), 'email_addr': username + '@localhost.local', 'desc': 'Default administrative account', 'creation_date': tstamp } cork._store.save_users() #for security reasons we fdo not want this in the log files. print "A 'admin' account has been created with the password '{0}'".format(password) return cork #for debugging if __name__ == '__main__': m = MnemoWebAPI('mnemosyne') m.start_listening(host='localhost', port='8181')
gpl-3.0
giggsey/SickRage
lib/github/StatsContributor.py
74
4471
# -*- coding: utf-8 -*- # ########################## Copyrights and license ############################ # # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # # ############################################################################## import github.GithubObject import github.NamedUser class StatsContributor(github.GithubObject.NonCompletableGithubObject): """ This class represents statistics of a contibutor. The reference can be found here http://developer.github.com/v3/repos/statistics/#get-contributors-list-with-additions-deletions-and-commit-counts """ class Week(github.GithubObject.NonCompletableGithubObject): """ This class represents weekly statistics of a contibutor. """ @property def w(self): """ :type: datetime.datetime """ return self._w.value @property def a(self): """ :type: int """ return self._a.value @property def d(self): """ :type: int """ return self._d.value @property def c(self): """ :type: int """ return self._c.value def _initAttributes(self): self._w = github.GithubObject.NotSet self._a = github.GithubObject.NotSet self._d = github.GithubObject.NotSet self._c = github.GithubObject.NotSet def _useAttributes(self, attributes): if "w" in attributes: # pragma no branch self._w = self._makeTimestampAttribute(attributes["w"]) if "a" in attributes: # pragma no branch self._a = self._makeIntAttribute(attributes["a"]) if "d" in attributes: # pragma no branch self._d = self._makeIntAttribute(attributes["d"]) if "c" in attributes: # pragma no branch self._c = self._makeIntAttribute(attributes["c"]) @property def author(self): """ :type: :class:`github.NamedUser.NamedUser` """ return self._author.value @property def total(self): """ :type: int """ return self._total.value @property def weeks(self): """ :type: list of :class:`.Week` """ return self._weeks.value def _initAttributes(self): self._author = github.GithubObject.NotSet self._total = github.GithubObject.NotSet self._weeks = github.GithubObject.NotSet def _useAttributes(self, attributes): if "author" in attributes: # pragma no branch self._author = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["author"]) if "total" in attributes: # pragma no branch self._total = self._makeIntAttribute(attributes["total"]) if "weeks" in attributes: # pragma no branch self._weeks = self._makeListOfClassesAttribute(self.Week, attributes["weeks"])
gpl-3.0
MostlyOpen/odoo_addons_jcafb
myo_address_cst/wizard/address_summary_wizard.py
1
2688
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### from openerp import api, fields, models import logging _logger = logging.getLogger(__name__) class AddressSummaryWizard(models.TransientModel): _name = 'myo.address.summary.wizard' address_ids = fields.Many2many('myo.address', string='Addresses') category_id = fields.Many2one('myo.summary.category', string='Category') @api.multi def do_address_update(self): self.ensure_one() summary_model = self.env['myo.summary'] for address_reg in self.address_ids: name = address_reg.name if address_reg.user_id is not False: user_id = address_reg.user_id.id address_id = address_reg.id values = { 'name': name, 'user_id': user_id, 'address_id': address_id, 'is_address_summary': True, } new_summary = summary_model.create(values) print '>>>>>>>>>>>', self.category_id.id if self.category_id.id is not False: values = { 'category_ids': [(4, self.category_id.id)], } new_summary.write(values) return True @api.multi def do_reopen_form(self): self.ensure_one() return { 'type': 'ir.actions.act_window', 'res_model': self._name, # this model 'res_id': self.id, # the current wizard record 'view_type': 'form', 'view_mode': 'form, tree', 'target': 'new'} @api.multi def do_populate_marked_addresses(self): self.ensure_one() self.address_ids = self._context.get('active_ids') # reopen wizard form on same wizard record return self.do_reopen_form()
agpl-3.0
SRabbelier/Melange
app/soc/modules/seeder/logic/providers/phone_number.py
1
1423
#!/usr/bin/python2.5 # # Copyright 2010 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing data providers for PhoneNumberProperty. """ from soc.modules.seeder.logic.providers.provider import BaseDataProvider from soc.modules.seeder.logic.providers.provider import FixedValueProvider import random __authors__ = [ '"Felix Kerekes" <sttwister@gmail.com>', ] # pylint: disable=W0223 class PhoneNumberProvider(BaseDataProvider): """Base class for all data providers that return a phone number. """ pass # pylint: disable=W0223 class FixedPhoneNumberProvider(PhoneNumberProvider, FixedValueProvider): """Data provider that returns a fixed phone number. """ pass class RandomPhoneNumberProvider(PhoneNumberProvider): """Data provider that returns a random phone number. """ def getValue(self): return ''.join(str(random.randint(0, 9)) for _ in range(10))
apache-2.0
rven/odoo
addons/purchase/tests/test_purchase_order_report.py
2
2744
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.addons.account.tests.common import AccountTestInvoicingCommon from odoo.tests import Form, tagged from datetime import datetime @tagged('post_install', '-at_install') class TestPurchaseOrderReport(AccountTestInvoicingCommon): def test_00_purchase_order_report(self): uom_dozen = self.env.ref('uom.product_uom_dozen') po = self.env['purchase.order'].create({ 'partner_id': self.partner_a.id, 'currency_id': self.currency_data['currency'].id, 'order_line': [ (0, 0, { 'name': self.product_a.name, 'product_id': self.product_a.id, 'product_qty': 1.0, 'product_uom': uom_dozen.id, 'price_unit': 100.0, 'date_planned': datetime.today(), 'taxes_id': False, }), (0, 0, { 'name': self.product_b.name, 'product_id': self.product_b.id, 'product_qty': 1.0, 'product_uom': uom_dozen.id, 'price_unit': 200.0, 'date_planned': datetime.today(), 'taxes_id': False, }), ], }) po.button_confirm() f = Form(self.env['account.move'].with_context(default_move_type='in_invoice')) f.invoice_date = f.date f.partner_id = po.partner_id f.purchase_id = po invoice = f.save() invoice.action_post() po.flush() res_product1 = self.env['purchase.report'].search([ ('order_id', '=', po.id), ('product_id', '=', self.product_a.id), ('company_id', '=', self.company_data['company'].id), ]) # check that report will convert dozen to unit or not self.assertEqual(res_product1.qty_ordered, 12.0, 'UoM conversion is not working') # report should show in company currency (amount/rate) = (100/2) self.assertEqual(res_product1.price_total, 50.0, 'Currency conversion is not working') res_product2 = self.env['purchase.report'].search([ ('order_id', '=', po.id), ('product_id', '=', self.product_b.id), ('company_id', '=', self.company_data['company'].id), ]) self.assertEqual(res_product2.qty_ordered, 1.0, 'No conversion needed since product_b is already a dozen') # report should show in company currency (amount/rate) = (200/2) self.assertEqual(res_product2.price_total, 100.0, 'Currency conversion is not working')
agpl-3.0
TheWardoctor/Wardoctors-repo
plugin.video.master.reborn/resources/lib/modules/favourites.py
4
6125
# -*- coding: utf-8 -*- ''' Specto Add-on Copyright (C) 2015 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' try: from sqlite3 import dbapi2 as database except: from pysqlite2 import dbapi2 as database import json,os,xbmc,xbmcaddon from resources.lib.modules import control addonInfo = xbmcaddon.Addon().getAddonInfo dataPath = xbmc.translatePath(addonInfo('profile')).decode('utf-8') favouritesFile = os.path.join(dataPath, 'favourites.db') progressFile = os.path.join(dataPath, 'progress.db') def getFavourites(content): try: dbcon = database.connect(favouritesFile) dbcur = dbcon.cursor() dbcur.execute("SELECT * FROM %s" % content) items = dbcur.fetchall() items = [(i[0].encode('utf-8'), eval(i[1].encode('utf-8'))) for i in items] except: items = [] return items def getProgress(content): try: dbcon = database.connect(progressFile) dbcur = dbcon.cursor() dbcur.execute("SELECT * FROM %s" % content) items = dbcur.fetchall() items = [(i[0].encode('utf-8'), eval(i[1].encode('utf-8'))) for i in items] except: items = [] return items def addFavourite(meta, content): try: item = dict() meta = json.loads(meta) # print "META DUMP FAVOURITES %s" % meta try: id = meta['imdb'] except: id = meta['tvdb'] if 'title' in meta: title = item['title'] = meta['title'] if 'tvshowtitle' in meta: title = item['title'] = meta['tvshowtitle'] if 'year' in meta: item['year'] = meta['year'] if 'poster' in meta: item['poster'] = meta['poster'] if 'fanart' in meta: item['fanart'] = meta['fanart'] if 'imdb' in meta: item['imdb'] = meta['imdb'] if 'tmdb' in meta: item['tmdb'] = meta['tmdb'] if 'tvdb' in meta: item['tvdb'] = meta['tvdb'] if 'tvrage' in meta: item['tvrage'] = meta['tvrage'] control.makeFile(dataPath) dbcon = database.connect(favouritesFile) dbcur = dbcon.cursor() dbcur.execute("CREATE TABLE IF NOT EXISTS %s (""id TEXT, ""items TEXT, ""UNIQUE(id)"");" % content) dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, id)) dbcur.execute("INSERT INTO %s Values (?, ?)" % content, (id, repr(item))) dbcon.commit() control.refresh() control.infoDialog('Added to Watchlist', heading=title, icon=item['poster']) except: return def addEpisodes(meta, content): try: item = dict() meta = json.loads(meta) content = "episode" try: id = meta['imdb'] except: id = meta['tvdb'] if 'title' in meta: title = item['title'] = meta['title'] if 'tvshowtitle' in meta: title = item['tvshowtitle'] = meta['tvshowtitle'] if 'year' in meta: item['year'] = meta['year'] if 'poster' in meta: item['poster'] = meta['poster'] if 'fanart' in meta: item['fanart'] = meta['fanart'] if 'imdb' in meta: item['imdb'] = meta['imdb'] if 'tmdb' in meta: item['tmdb'] = meta['tmdb'] if 'tvdb' in meta: item['tvdb'] = meta['tvdb'] if 'tvrage' in meta: item['tvrage'] = meta['tvrage'] if 'episode' in meta: item['episode'] = meta['episode'] if 'season' in meta: item['season'] = meta['season'] if 'premiered' in meta: item['premiered'] = meta['premiered'] if 'original_year' in meta: item['original_year'] = meta['original_year'] control.makeFile(dataPath) dbcon = database.connect(favouritesFile) dbcur = dbcon.cursor() dbcur.execute("CREATE TABLE IF NOT EXISTS %s (""id TEXT, ""items TEXT, ""UNIQUE(id)"");" % content) dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, id)) dbcur.execute("INSERT INTO %s Values (?, ?)" % content, (id, repr(item))) dbcon.commit() control.refresh() control.infoDialog('Added to Watchlist', heading=title) except: return def deleteFavourite(meta, content): try: meta = json.loads(meta) if 'title' in meta: title = meta['title'] if 'tvshowtitle' in meta: title = meta['tvshowtitle'] try: dbcon = database.connect(favouritesFile) dbcur = dbcon.cursor() try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['imdb'])) except: pass try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tvdb'])) except: pass try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tmdb'])) except: pass dbcon.commit() except: pass control.refresh() control.infoDialog('Removed From Watchlist', heading=title) except: return def deleteProgress(meta, content): try: meta = json.loads(meta) try: dbcon = database.connect(progressFile) dbcur = dbcon.cursor() try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['imdb'])) except: pass try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tvdb'])) except: pass try: dbcur.execute("DELETE FROM %s WHERE id = '%s'" % (content, meta['tmdb'])) except: pass dbcon.commit() except: pass control.refresh() except: return
apache-2.0
aveminus/sumatrapdf
tools/efi/efiparse.py
17
13204
#!/usr/bin/env python """ Parses the output of efi.exe. TODO: - do a per .obj file string size changes """ import bz2, bisect g_file_name = "efi.txt" (SECTION_CODE, SECTION_DATA, SECTION_BSS, SECTION_UNKNOWN) = ("C", "D", "B", "U") # maps a numeric string idx to string. We take advantage of the fact that # strings in efi.exe output are stored with consequitive indexes class Strings(): def __init__(self): self.strings = [] def add(self, idx, str): assert idx == len(self.strings) self.strings.append(str) def idx_to_str(self, idx): return self.strings[idx] # type | sectionNo | length | offset | objFileId # C|1|35|0|C:\Users\kkowalczyk\src\sumatrapdf\obj-dbg\sumatrapdf\SumatraPDF.obj class Section(object): def __init__(self, l, strings): parts = l.split("|") assert len(parts) == 5 self.type = parts[0] self.section_no = int(parts[1]) self.size = int(parts[2]) self.offset = int(parts[3]) idx = int(parts[4]) self.name = strings.idx_to_str(idx) def print_i_off_sec(i, off, section): print("""i: %d off: %d section.offset: %d """ % (i, off, section.offset)) class SectionsSorted(object): def __init__(self): self.offsets = [] self.sections = [] def add(self, section): prev_sec_idx = len(self.offsets) - 1 self.offsets.append(section.offset) self.sections.append(section) if prev_sec_idx > 1: prev_off = self.offsets[prev_sec_idx] assert prev_off <= section.offset def objname_by_offset(self, off): i = bisect.bisect_left(self.offsets, off) if i >= len(self.sections): i = len(self.sections) - 1 section = self.sections[i] if off < section.offset: try: assert i > 0 except: print_i_off_sec(i, off, section) raise i -= 1 section = self.sections[i] try: assert off >= section.offset except: print_i_off_sec(i, off, section) raise if len(self.sections) < i + 1: next_section = self.sections[i+1] assert off < next_section.offset return section.name class SectionToObjFile(object): def __init__(self, sections, strings): self.strings = strings sec_no_to_sec = {} curr_sec_no = -1 curr_sec_sorted = None for s in sections: if s.section_no != curr_sec_no: assert s.section_no not in sec_no_to_sec assert s.section_no > curr_sec_no curr_sec_no = s.section_no curr_sec_sorted = SectionsSorted() sec_no_to_sec[curr_sec_no] = curr_sec_sorted curr_sec_sorted.add(s) self.sec_no_to_sec = sec_no_to_sec def get_objname_by_sec_no_off(self, sec_no, sec_off): # Note: it does happen that we have symbols in sections # that are not in sections list, like: # P|6|553|0|0|__except_list # D|6|0|553|0|__safe_se_handler_count| if sec_no not in self.sec_no_to_sec: return "" sec_sorted = self.sec_no_to_sec[sec_no] return sec_sorted.objname_by_offset(sec_off) def get_objname_by_symbol(self, sym): return self.get_objname_by_sec_no_off(sym.section, sym.offset) (SYM_NULL, SYM_EXE, SYM_COMPILAND, SYM_COMPILAND_DETAILS) = ("N", "Exe", "C", "CD") (SYM_COMPILAND_ENV, SYM_FUNCTION, SYM_BLOCK, SYM_DATA) = ("CE", "F", "B", "D") (SYM_ANNOTATION, SYM_LABEL, SYM_PUBLIC, SYM_UDT, SYM_ENUM) = ("A", "L", "P", "U", "E") (SYM_FUNC_TYPE, SYM_POINTER_TYPE, SYM_ARRAY_TYPE) = ("FT", "PT", "AT") (SYM_BASE_TYPE, SYM_TYPEDEF, SYM_BASE_CLASS, SYM_FRIEND) = ("BT", "T", "BC", "Friend") (SYM_FUNC_ARG_TYPE, SYM_FUNC_DEBUG_START, SYM_FUNC_DEBUG_END) = ("FAT", "FDS", "FDE") (SYM_USING_NAMESPACE, SYM_VTABLE_SHAPE, SYM_VTABLE, SYM_CUSTOM) = ("UN", "VTS", "VT", "Custom") (SYM_THUNK, SYM_CUSTOM_TYPE, SYM_MANAGED_TYPE, SYM_DIMENSION) = ("Thunk", "CT", "MT", "Dim") # type | section | length | offset | rva | name # F|1|35|0|4096|AllocArray<wchar_t>|wchar_t*__cdeclAllocArray<wchar_t>(unsignedint) class Symbol(object): def __init__(self, l): parts = l.split("|") assert len(parts) in (6,7), "len(parts) is %d\n'%s'" % (len(parts), l) self.type = parts[0] self.section = int(parts[1]) self.size = int(parts[2]) self.offset = int(parts[3]) self.rva = int(parts[4]) self.name = parts[5] if self.type == SYM_THUNK: self.thunk_type = parts[6] elif self.type == SYM_DATA: self.data_type_name = parts[6] self.objname = None def full_name(self): return self.name + "@" + self.objname class Type(object): def __init__(self, l): # TODO: parse the line self.line = l def print_sym(sym): print(sym) print("name : %s" % sym.name) print("off : %d" % sym.offset) print("size : %d" % sym.size) class ParseState(object): def __init__(self, fo, obj_file_splitters): self.fo = fo self.obj_file_splitters = obj_file_splitters self.strings = Strings() self.types = [] self.symbols = [] self.sections = [] # functions, strings etc. are laid out rounded so e.g. a function 11 bytes # in size really takes 16 bytes in the binary, due to rounding of the symbol # after it. Those values allow us to calculate how much is wasted due # to rounding self.symbols_unrounded_size = 0 self.symbols_rounding_waste = 0 def add_symbol(self, sym): self.symbols.append(sym) self.symbols_unrounded_size += sym.size prev_sym_idx = len(self.symbols) - 2 if prev_sym_idx < 0: return prev_sym = self.symbols[prev_sym_idx] prev_sym_rounded_size = sym.offset - prev_sym.offset # prev_sym_rounded_size/ prev_sym_wasted can be < 0 in rare cases because # symbols can be inter-leaved e.g. a data symbol can be inside function # symbol, which breaks the simplistic logic of calculating rounded size # as curr.offset - prev.offset it can also happen when we cross section # boundaries. We just ignore those cases because approximate data is # better than no data if prev_sym_rounded_size < 0: return prev_sym_wasted = prev_sym_rounded_size - prev_sym.size # Note: I don't understand why but efi dump shows some very large gaps # between e.g. 2 functions. I filter everything above 16 bytes, since # wastage shouldn't be bigger than that if prev_sym_wasted > 16: #prev_sym_off = prev_sym.offset #sym_off = sym.offset return if prev_sym_wasted > 0: self.symbols_rounding_waste += prev_sym_wasted def readline(self): l = self.fo.readline() if not l: return None l = l.rstrip() #print("'%s'" % l) return l def parse_start(state): l = state.readline() if l is None or len(l) == 0: return None assert l == "Format: 1", "unexpected line: '%s'" % l return parse_next_section def parse_next_section(state): l = state.readline() #print("'%s'" % l) if l == None: return None if l == "": return parse_next_section if l == "Strings:": return parse_strings if l == "Types:": return parse_types if l == "Sections:": return parse_sections if l == "Symbols:": return parse_symbols print("Unknonw section: '%s'" % l) return None def parse_strings(state): while True: l = state.readline() if l == None: return None if l == "": return parse_next_section parts = l.split("|", 2) idx = int(parts[0]) s = parts[1] for splitter in state.obj_file_splitters: pos = s.find(splitter) if -1 != pos: s = s[pos + len(splitter):] break state.strings.add(idx, s) def parse_sections(state): while True: l = state.readline() if l == None: return None if l == "": return parse_next_section state.sections.append(Section(l, state.strings)) def parse_symbols(state): while True: l = state.readline() if l == None: return None if l == "": return parse_next_section state.add_symbol(Symbol(l)) def parse_types(state): while True: l = state.readline() if l == None: return None if l == "": return parse_next_section # TODO: should parse structs, not just count them if l.startswith("struct"): state.types.append(Type(l)) def calc_symbols_objname(state): sec_to_objfile = SectionToObjFile(state.sections, state.strings) for sym in state.symbols: sym.objname = sec_to_objfile.get_objname_by_symbol(sym) def parse_file_object(fo, obj_file_splitters): state = ParseState(fo, obj_file_splitters) curr = parse_start while curr: curr = curr(state) calc_symbols_objname(state) return state def parse_file(file_name, obj_file_splitters=[]): print("parse_file: %s" % file_name) if file_name.endswith(".bz2"): with bz2.BZ2File(file_name, "r", buffering=2*1024*1024) as fo: return parse_file_object(fo, obj_file_splitters) with open(file_name, "r") as fo: return parse_file_object(fo, obj_file_splitters) def n_as_str(n): if n > 0: return "+" + str(n) return str(n) class Diff(object): def __init__(self): self.added = [] self.removed = [] self.changed = [] self.str_sizes1 = 0 self.str_sizes2 = 0 self.n_symbols1 = 0 self.symbols_unrounded_size1 = 0 self.symbols_rounding_waste1 = 0 self.n_symbols2 = 0 self.symbols_unrounded_size2 = 0 self.symbols_rounding_waste2 = 0 def __repr__(self): str_sizes1 = self.str_sizes1 str_sizes2 = self.str_sizes2 str_sizes_diff = n_as_str(str_sizes2 - str_sizes1) n_symbols1 = self.n_symbols1 n_symbols2 = self.n_symbols2 symbols_diff = n_as_str(n_symbols2 - n_symbols1) sym_size1 = self.symbols_unrounded_size1 sym_size2 = self.symbols_unrounded_size2 sym_size_diff = n_as_str(sym_size2 - sym_size1) wasted1 = self.symbols_rounding_waste1 wasted2 = self.symbols_rounding_waste2 wasted_diff = n_as_str(wasted2 - wasted1) n_added = len(self.added) n_removed = len(self.removed) n_changed = len(self.changed) s = """symbols : %(symbols_diff)-6s (%(n_symbols1)d => %(n_symbols2)d) added : %(n_added)d removed : %(n_removed)d changed : %(n_changed)d symbol sizes : %(sym_size_diff)-6s (%(sym_size1)d => %(sym_size2)d) wasted rouding: %(wasted_diff)-6s (%(wasted1)d => %(wasted2)d) string sizes : %(str_sizes_diff)-6s (%(str_sizes1)d => %(str_sizes2)d)""" % locals() return s def same_sym_sizes(syms): sizes = [] for sym in syms: if sym.size in sizes: return True sizes.append(sym.size) return False def syms_len(syms): if isinstance(syms, list): return len(syms) return 1 class ChangedSymbol(object): def __init__(self, sym1, sym2): assert sym1.name == sym2.name self.name = sym1.name self.size_diff = sym2.size - sym1.size self._full_name = sym1.full_name() def full_name(self): return self._full_name class SymbolStats(object): def __init__(self): self.name_to_sym = {} self.str_sizes = 0 def process_symbols(self, symbols): for sym in symbols: name = sym.name # for anonymous strings, we just count their total size # since we don't have a way to tell one string from another if name == "*str": self.str_sizes += sym.size continue if name not in self.name_to_sym: self.name_to_sym[name] = sym continue v = self.name_to_sym[name] if isinstance(v, list): v.append(sym) else: v = [v, sym] self.name_to_sym[name] = v def syms_len(self, name): if name not in self.name_to_sym: return 0 return syms_len(self.name_to_sym[name]) def find_added(name, diff_syms1, diff_syms2): if name not in diff_syms1.name_to_sym: syms = diff_syms2.name_to_sym[name] if isinstance(syms, list): return syms return [syms] return [] def diff(parse1, parse2): assert isinstance(parse1, ParseState) assert isinstance(parse2, ParseState) diff_syms1 = SymbolStats() diff_syms1.process_symbols(parse1.symbols) diff_syms2 = SymbolStats() diff_syms2.process_symbols(parse2.symbols) added = [] changed = [] for name in diff_syms2.name_to_sym.keys(): len1 = diff_syms1.syms_len(name) len2 = diff_syms2.syms_len(name) if len2 > len1: added += find_added(name, diff_syms1, diff_syms2) else: if len1 == 1 and len2 == 1: sym1 = diff_syms1.name_to_sym[name] sym2 = diff_syms2.name_to_sym[name] if sym1.size != sym2.size: changed += [ChangedSymbol(sym1, sym2)] removed = [] for name in diff_syms1.name_to_sym.keys(): len1 = diff_syms2.syms_len(name) len2 = diff_syms1.syms_len(name) if len2 > len1: removed += find_added(name, diff_syms2, diff_syms1) diff = Diff() diff.syms1 = diff_syms1 diff.syms2 = diff_syms2 diff.str_sizes1 = diff_syms1.str_sizes diff.str_sizes2 = diff_syms2.str_sizes diff.added = added diff.removed = removed diff.changed = changed diff.n_symbols1 = len(parse1.symbols) diff.symbols_unrounded_size1 = parse1.symbols_unrounded_size diff.symbols_rounding_waste1 = parse1.symbols_rounding_waste diff.n_symbols2 = len(parse2.symbols) diff.symbols_unrounded_size2 = parse2.symbols_unrounded_size diff.symbols_rounding_waste2 = parse2.symbols_rounding_waste return diff def main(): state = parse_file(g_file_name) print("%d types, %d sections, %d symbols" % (len(state.types), len(state.sections), len(state.symbols))) if __name__ == "__main__": main()
gpl-3.0
manazhao/tf_recsys
tensorflow/contrib/keras/python/keras/utils/data_utils_test.py
16
4898
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for data_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from itertools import cycle import threading import numpy as np from tensorflow.contrib.keras.python import keras from tensorflow.python.platform import test class ThreadsafeIter(object): def __init__(self, it): self.it = it self.lock = threading.Lock() def __iter__(self): return self def __next__(self): return self.next() def next(self): with self.lock: return next(self.it) def threadsafe_generator(f): def g(*a, **kw): return ThreadsafeIter(f(*a, **kw)) return g class TestSequence(keras.utils.data_utils.Sequence): def __init__(self, shape): self.shape = shape def __getitem__(self, item): return np.ones(self.shape, dtype=np.uint8) * item def __len__(self): return 100 class FaultSequence(keras.utils.data_utils.Sequence): def __getitem__(self, item): raise IndexError(item, 'item is not present') def __len__(self): return 100 @threadsafe_generator def create_generator_from_sequence_threads(ds): for i in cycle(range(len(ds))): yield ds[i] def create_generator_from_sequence_pcs(ds): for i in cycle(range(len(ds))): yield ds[i] class TestEnqueuers(test.TestCase): def test_generator_enqueuer_threads(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_threads(TestSequence([3, 200, 200, 3])), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(int(next(gen_output)[0, 0, 0, 0])) self.assertEqual(len(set(acc) - set(range(100))), 0) enqueuer.stop() def test_generator_enqueuer_processes(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_pcs(TestSequence([3, 200, 200, 3])), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(int(next(gen_output)[0, 0, 0, 0])) self.assertNotEqual(acc, list(range(100))) enqueuer.stop() def test_generator_enqueuer_fail_threads(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_threads(FaultSequence()), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(StopIteration): next(gen_output) def test_generator_enqueuer_fail_processes(self): enqueuer = keras.utils.data_utils.GeneratorEnqueuer( create_generator_from_sequence_pcs(FaultSequence()), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(StopIteration): next(gen_output) def test_ordered_enqueuer_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc, list(range(100))) enqueuer.stop() def test_ordered_enqueuer_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( TestSequence([3, 200, 200, 3]), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() acc = [] for _ in range(100): acc.append(next(gen_output)[0, 0, 0, 0]) self.assertEqual(acc, list(range(100))) enqueuer.stop() def test_ordered_enqueuer_fail_threads(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( FaultSequence(), use_multiprocessing=False) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(StopIteration): next(gen_output) def test_ordered_enqueuer_fail_processes(self): enqueuer = keras.utils.data_utils.OrderedEnqueuer( FaultSequence(), use_multiprocessing=True) enqueuer.start(3, 10) gen_output = enqueuer.get() with self.assertRaises(StopIteration): next(gen_output) if __name__ == '__main__': test.main()
apache-2.0
DelazJ/QGIS
python/plugins/processing/preconfigured/PreconfiguredAlgorithm.py
30
2392
# -*- coding: utf-8 -*- """ *************************************************************************** PreconfiguredAlgorithm.py --------------------- Date : April 2016 Copyright : (C) 2016 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'April 2016' __copyright__ = '(C) 2016, Victor Olaya' import os from qgis.core import (QgsProcessingAlgorithm, QgsApplication) from processing.core.GeoAlgorithm import GeoAlgorithm from copy import deepcopy import json class PreconfiguredAlgorithm(GeoAlgorithm): def __init__(self, descriptionFile): self.descriptionFile = descriptionFile with open(self.descriptionFile) as f: self.description = json.load(f) GeoAlgorithm.__init__(self) self._name = self.description["name"] self._group = self.description["group"] def group(self): return self._group def displayName(self): return self._name def name(self): return os.path.splitext(os.path.basename(self.descriptionFile))[0].lower() def flags(self): return QgsProcessingAlgorithm.FlagHideFromModeler def execute(self, parameters, context=None, feedback=None, model=None): new_parameters = deepcopy(parameters) self.alg = QgsApplication.processingRegistry().createAlgorithmById(self.description["algname"]) for name, value in list(self.description["parameters"].items()): new_parameters[name] = value for name, value in list(self.description["outputs"].items()): self.alg.setOutputValue(name, value) self.alg.execute(new_parameters, feedback) self.outputs = self.alg.outputs
gpl-2.0
jindongh/boto
tests/integration/swf/test_layer1_workflow_execution.py
114
6866
""" Tests for Layer1 of Simple Workflow """ import time import uuid import json import traceback from boto.swf.layer1_decisions import Layer1Decisions from tests.integration.swf.test_layer1 import SimpleWorkflowLayer1TestBase class SwfL1WorkflowExecutionTest(SimpleWorkflowLayer1TestBase): """ test a simple workflow execution """ swf = True def run_decider(self): """ run one iteration of a simple decision engine """ # Poll for a decision task. tries = 0 while True: dtask = self.conn.poll_for_decision_task(self._domain, self._task_list, reverse_order=True) if dtask.get('taskToken') is not None: # This means a real decision task has arrived. break time.sleep(2) tries += 1 if tries > 10: # Give up if it's taking too long. Probably # means something is broken somewhere else. assert False, 'no decision task occurred' # Get the most recent interesting event. ignorable = ( 'DecisionTaskScheduled', 'DecisionTaskStarted', 'DecisionTaskTimedOut', ) event = None for tevent in dtask['events']: if tevent['eventType'] not in ignorable: event = tevent break # Construct the decision response. decisions = Layer1Decisions() if event['eventType'] == 'WorkflowExecutionStarted': activity_id = str(uuid.uuid1()) decisions.schedule_activity_task(activity_id, self._activity_type_name, self._activity_type_version, task_list=self._task_list, input=event['workflowExecutionStartedEventAttributes']['input']) elif event['eventType'] == 'ActivityTaskCompleted': decisions.complete_workflow_execution( result=event['activityTaskCompletedEventAttributes']['result']) elif event['eventType'] == 'ActivityTaskFailed': decisions.fail_workflow_execution( reason=event['activityTaskFailedEventAttributes']['reason'], details=event['activityTaskFailedEventAttributes']['details']) else: decisions.fail_workflow_execution( reason='unhandled decision task type; %r' % (event['eventType'],)) # Send the decision response. r = self.conn.respond_decision_task_completed(dtask['taskToken'], decisions=decisions._data, execution_context=None) assert r is None def run_worker(self): """ run one iteration of a simple worker engine """ # Poll for an activity task. tries = 0 while True: atask = self.conn.poll_for_activity_task(self._domain, self._task_list, identity='test worker') if atask.get('activityId') is not None: # This means a real activity task has arrived. break time.sleep(2) tries += 1 if tries > 10: # Give up if it's taking too long. Probably # means something is broken somewhere else. assert False, 'no activity task occurred' # Do the work or catch a "work exception." reason = None try: result = json.dumps(sum(json.loads(atask['input']))) except: reason = 'an exception was raised' details = traceback.format_exc() if reason is None: r = self.conn.respond_activity_task_completed( atask['taskToken'], result) else: r = self.conn.respond_activity_task_failed( atask['taskToken'], reason=reason, details=details) assert r is None def test_workflow_execution(self): # Start a workflow execution whose activity task will succeed. workflow_id = 'wfid-%.2f' % (time.time(),) r = self.conn.start_workflow_execution(self._domain, workflow_id, self._workflow_type_name, self._workflow_type_version, execution_start_to_close_timeout='20', input='[600, 15]') # Need the run_id to lookup the execution history later. run_id = r['runId'] # Move the workflow execution forward by having the # decider schedule an activity task. self.run_decider() # Run the worker to handle the scheduled activity task. self.run_worker() # Complete the workflow execution by having the # decider close it down. self.run_decider() # Check that the result was stored in the execution history. r = self.conn.get_workflow_execution_history(self._domain, run_id, workflow_id, reverse_order=True)['events'][0] result = r['workflowExecutionCompletedEventAttributes']['result'] assert json.loads(result) == 615 def test_failed_workflow_execution(self): # Start a workflow execution whose activity task will fail. workflow_id = 'wfid-%.2f' % (time.time(),) r = self.conn.start_workflow_execution(self._domain, workflow_id, self._workflow_type_name, self._workflow_type_version, execution_start_to_close_timeout='20', input='[600, "s"]') # Need the run_id to lookup the execution history later. run_id = r['runId'] # Move the workflow execution forward by having the # decider schedule an activity task. self.run_decider() # Run the worker to handle the scheduled activity task. self.run_worker() # Complete the workflow execution by having the # decider close it down. self.run_decider() # Check that the failure was stored in the execution history. r = self.conn.get_workflow_execution_history(self._domain, run_id, workflow_id, reverse_order=True)['events'][0] reason = r['workflowExecutionFailedEventAttributes']['reason'] assert reason == 'an exception was raised'
mit
KaiWeiChang/vowpal_wabbit
utl/vw-hyperopt.py
7
15612
#!/usr/bin/env python # coding: utf-8 """ Github version of hyperparameter optimization for Vowpal Wabbit via hyperopt """ __author__ = 'kurtosis' from hyperopt import hp, fmin, tpe, rand, Trials, STATUS_OK from sklearn.metrics import roc_curve, auc, log_loss, precision_recall_curve import numpy as np from datetime import datetime as dt import subprocess, shlex from math import exp, log import argparse import re import logging import json import matplotlib from matplotlib import pyplot as plt try: import seaborn as sns except ImportError: print ("Warning: seaborn is not installed. " "Without seaborn, standard matplotlib plots will not look very charming. " "It's recommended to install it via pip install seaborn") def read_arguments(): parser = argparse.ArgumentParser() parser.add_argument('--searcher', type=str, default='tpe', choices=['tpe', 'rand']) parser.add_argument('--max_evals', type=int, default=100) parser.add_argument('--train', type=str, required=True, help="training set") parser.add_argument('--holdout', type=str, required=True, help="holdout set") parser.add_argument('--vw_space', type=str, required=True, help="hyperparameter search space (must be 'quoted')") parser.add_argument('--outer_loss_function', default='logistic', choices=['logistic', 'roc-auc']) # TODO: implement squared, hinge, quantile, PR-auc parser.add_argument('--regression', action='store_true', default=False, help="""regression (continuous class labels) or classification (-1 or 1, default value).""") parser.add_argument('--plot', action='store_true', default=False, help=("Plot the results in the end. " "Requires matplotlib and " "(optionally) seaborn to be installed.")) args = parser.parse_args() return args class HyperoptSpaceConstructor(object): """ Takes command-line input and transforms it into hyperopt search space An example of command-line input: --algorithms=ftrl,sgd --l2=1e-8..1e-4~LO -l=0.01..10~L --ftrl_beta=0.01..1 --passes=1..10~I -q=SE+SZ+DR,SE~O """ def __init__(self, command): self.command = command self.space = None self.algorithm_metadata = { 'ftrl': {'arg': '--ftrl', 'prohibited_flags': set()}, 'sgd': {'arg': '', 'prohibited_flags': {'--ftrl_alpha', '--ftrl_beta'}} } self.range_pattern = re.compile("[^~]+") # re.compile("(?<=\[).+(?=\])") self.distr_pattern = re.compile("(?<=~)[IOL]*") # re.compile("(?<=\])[IOL]*") self.only_continuous = re.compile("(?<=~)[IL]*") # re.compile("(?<=\])[IL]*") def _process_vw_argument(self, arg, value, algorithm): try: distr_part = self.distr_pattern.findall(value)[0] except IndexError: distr_part = '' range_part = self.range_pattern.findall(value)[0] is_continuous = '..' in range_part ocd = self.only_continuous.findall(value) if not is_continuous and len(ocd)> 0 and ocd[0] != '': raise ValueError(("Need a range instead of a list of discrete values to define " "uniform or log-uniform distribution. " "Please, use [min..max]%s form") % (distr_part)) if is_continuous and arg == '-q': raise ValueError(("You must directly specify namespaces for quadratic features " "as a list of values, not as a parametric distribution")) hp_choice_name = "_".join([algorithm, arg.replace('-', '')]) try_omit_zero = 'O' in distr_part distr_part = distr_part.replace('O', '') if is_continuous: vmin, vmax = [float(i) for i in range_part.split('..')] if distr_part == 'L': distrib = hp.loguniform(hp_choice_name, log(vmin), log(vmax)) elif distr_part == '': distrib = hp.uniform(hp_choice_name, vmin, vmax) elif distr_part == 'I': distrib = hp.quniform(hp_choice_name, vmin, vmax, 1) elif distr_part in {'LI', 'IL'}: distrib = hp.qloguniform(hp_choice_name, log(vmin), log(vmax), 1) else: raise ValueError("Cannot recognize distribution: %s" % (distr_part)) else: possible_values = range_part.split(',') if arg == '-q': possible_values = [v.replace('+', ' -q ') for v in possible_values] distrib = hp.choice(hp_choice_name, possible_values) if try_omit_zero: hp_choice_name_outer = hp_choice_name + '_outer' distrib = hp.choice(hp_choice_name_outer, ['omit', distrib]) return distrib def string_to_pyll(self): line = shlex.split(self.command) algorithms = ['sgd'] for arg in line: arg, value = arg.split('=') if arg == '--algorithms': algorithms = set(self.range_pattern.findall(value)[0].split(',')) if tuple(self.distr_pattern.findall(value)) not in {(), ('O',)}: raise ValueError(("Distribution options are prohibited for --algorithms flag. " "Simply list the algorithms instead (like --algorithms=ftrl,sgd)")) elif self.distr_pattern.findall(value) == ['O']: algorithms.add('sgd') for algo in algorithms: if algo not in self.algorithm_metadata: raise NotImplementedError(("%s algorithm is not found. " "Supported algorithms by now are %s") % (algo, str(self.algorithm_metadata.keys()))) break self.space = {algo: {'type': algo, 'argument': self.algorithm_metadata[algo]['arg']} for algo in algorithms} for algo in algorithms: for arg in line: arg, value = arg.split('=') if arg == '--algorithms': continue if arg not in self.algorithm_metadata[algo]['prohibited_flags']: distrib = self._process_vw_argument(arg, value, algo) self.space[algo][arg] = distrib else: pass self.space = hp.choice('algorithm', self.space.values()) class HyperOptimizer(object): def __init__(self, train_set, holdout_set, command, max_evals=100, outer_loss_function='logistic', searcher='tpe', is_regression=False): self.train_set = train_set self.holdout_set = holdout_set self.train_model = './current.model' self.holdout_pred = './holdout.pred' self.trials_output = './trials.json' self.hyperopt_progress_plot = './hyperopt_progress.png' self.log = './log.log' self.logger = self._configure_logger() # hyperopt parameter sample, converted into a string with flags self.param_suffix = None self.train_command = None self.validate_command = None self.y_true_train = [] self.y_true_holdout = [] self.outer_loss_function = outer_loss_function self.space = self._get_space(command) self.max_evals = max_evals self.searcher = searcher self.is_regression = is_regression self.trials = Trials() self.current_trial = 0 def _get_space(self, command): hs = HyperoptSpaceConstructor(command) hs.string_to_pyll() return hs.space def _configure_logger(self): LOGGER_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)-8s [%(name)s/%(module)s:%(lineno)d]: %(message)s" LOGGER_DATEFMT = "%Y-%m-%d %H:%M:%S" LOGFILE = self.log logging.basicConfig(format=LOGGER_FORMAT, datefmt=LOGGER_DATEFMT, level=logging.DEBUG) formatter = logging.Formatter(LOGGER_FORMAT, datefmt=LOGGER_DATEFMT) file_handler = logging.FileHandler(LOGFILE) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) return logger def get_hyperparam_string(self, **kwargs): for arg in ['--passes']: #, '--rank', '--lrq']: if arg in kwargs: kwargs[arg] = int(kwargs[arg]) #print 'KWARGS: ', kwargs flags = [key for key in kwargs if key.startswith('-')] for flag in flags: if kwargs[flag] == 'omit': del kwargs[flag] self.param_suffix = ' '.join(['%s %s' % (key, kwargs[key]) for key in kwargs if key.startswith('-')]) self.param_suffix += ' %s' % (kwargs['argument']) def compose_vw_train_command(self): data_part = ('vw -d %s -f %s --holdout_off -c ' % (self.train_set, self.train_model)) self.train_command = ' '.join([data_part, self.param_suffix]) def compose_vw_validate_command(self): data_part = 'vw -t -d %s -i %s -p %s --holdout_off -c' \ % (self.holdout_set, self.train_model, self.holdout_pred) self.validate_command = data_part def fit_vw(self): self.compose_vw_train_command() self.logger.info("executing the following command (training): %s" % self.train_command) subprocess.call(shlex.split(self.train_command)) def validate_vw(self): self.compose_vw_validate_command() self.logger.info("executing the following command (validation): %s" % self.validate_command) subprocess.call(shlex.split(self.validate_command)) def get_y_true_train(self): self.logger.info("loading true train class labels...") yh = open(self.train_set, 'r') self.y_true_train = [] for line in yh: self.y_true_train.append(int(line.strip()[0:2])) if not self.is_regression: self.y_true_train = [(i + 1.) / 2 for i in self.y_true_train] self.logger.info("train length: %d" % len(self.y_true_train)) def get_y_true_holdout(self): self.logger.info("loading true holdout class labels...") yh = open(self.holdout_set, 'r') self.y_true_holdout = [] for line in yh: self.y_true_holdout.append(int(line.strip()[0:2])) if not self.is_regression: self.y_true_holdout = [(i + 1.) / 2 for i in self.y_true_holdout] self.logger.info("holdout length: %d" % len(self.y_true_holdout)) def validation_metric_vw(self): v = open('%s' % self.holdout_pred, 'r') y_pred_holdout = [] for line in v: y_pred_holdout.append(float(line.strip())) if self.outer_loss_function == 'logistic': y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout] loss = log_loss(self.y_true_holdout, y_pred_holdout_proba) elif self.outer_loss_function == 'squared': # TODO: write it pass elif self.outer_loss_function == 'hinge': # TODO: write it pass elif self.outer_loss_function == 'roc-auc': y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout] fpr, tpr, _ = roc_curve(self.y_true_holdout, y_pred_holdout_proba) loss = -auc(fpr, tpr) self.logger.info('parameter suffix: %s' % self.param_suffix) self.logger.info('loss value: %.6f' % loss) return loss def hyperopt_search(self, parallel=False): # TODO: implement parallel search with MongoTrials def objective(kwargs): start = dt.now() self.current_trial += 1 self.logger.info('\n\nStarting trial no.%d' % self.current_trial) self.get_hyperparam_string(**kwargs) self.fit_vw() self.validate_vw() loss = self.validation_metric_vw() finish = dt.now() elapsed = finish - start self.logger.info("evaluation time for this step: %s" % str(elapsed)) # clean up subprocess.call(shlex.split('rm %s %s' % (self.train_model, self.holdout_pred))) to_return = {'status': STATUS_OK, 'loss': loss, # TODO: include also train loss tracking in order to prevent overfitting 'eval_time': elapsed.seconds, 'train_command': self.train_command, 'current_trial': self.current_trial } return to_return self.trials = Trials() if self.searcher == 'tpe': algo = tpe.suggest elif self.searcher == 'rand': algo = rand.suggest logging.debug("starting hypersearch...") best_params = fmin(objective, space=self.space, trials=self.trials, algo=algo, max_evals=self.max_evals) self.logger.debug("the best hyperopt parameters: %s" % str(best_params)) json.dump(self.trials.results, open(self.trials_output, 'w')) self.logger.info('All the trials results are saved at %s' % self.trials_output) best_configuration = self.trials.results[np.argmin(self.trials.losses())]['train_command'] best_loss = self.trials.results[np.argmin(self.trials.losses())]['loss'] self.logger.info("\n\nA full training command with the best hyperparameters: \n%s\n\n" % best_configuration) self.logger.info("\n\nThe best holdout loss value: \n%s\n\n" % best_loss) return best_configuration, best_loss def plot_progress(self): try: sns.set_palette('Set2') sns.set_style("darkgrid", {"axes.facecolor": ".95"}) except: pass self.logger.debug('plotting...') plt.figure(figsize=(15,10)) plt.subplot(211) plt.plot(self.trials.losses(), '.', markersize=12) plt.title('Per-Iteration Outer Loss', fontsize=16) plt.ylabel('Outer loss function value') if self.outer_loss_function in ['logloss']: plt.yscale('log') xticks = [int(i) for i in np.linspace(plt.xlim()[0], plt.xlim()[1], min(len(self.trials.losses()), 11))] plt.xticks(xticks, xticks) plt.subplot(212) plt.plot(np.minimum.accumulate(self.trials.losses()), '.', markersize=12) plt.title('Cumulative Minimum Outer Loss', fontsize=16) plt.xlabel('Iteration number') plt.ylabel('Outer loss function value') xticks = [int(i) for i in np.linspace(plt.xlim()[0], plt.xlim()[1], min(len(self.trials.losses()), 11))] plt.xticks(xticks, xticks) plt.tight_layout() plt.savefig(self.hyperopt_progress_plot) self.logger.info('The diagnostic hyperopt progress plot is saved: %s' % self.hyperopt_progress_plot) def main(): args = read_arguments() h = HyperOptimizer(train_set=args.train, holdout_set=args.holdout, command=args.vw_space, max_evals=args.max_evals, outer_loss_function=args.outer_loss_function, searcher=args.searcher, is_regression=args.regression) h.get_y_true_holdout() h.hyperopt_search() if args.plot: h.plot_progress() if __name__ == '__main__': main()
bsd-3-clause
FreeAgent/djangoappengine-starter
django/contrib/auth/tests/permissions.py
231
1654
try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from django.contrib.auth.management import create_permissions from django.contrib.auth import models as auth_models from django.contrib.contenttypes import models as contenttypes_models from django.core.management import call_command from django.test import TestCase class TestAuthPermissions(TestCase): def tearDown(self): # These tests mess with content types, but content type lookups # are cached, so we need to make sure the effects of this test # are cleaned up. contenttypes_models.ContentType.objects.clear_cache() def test_permission_register_order(self): """Test that the order of registered permissions doesn't break""" # Changeset 14413 introduced a regression in the ordering of # newly created permissions for objects. When loading a fixture # after the initial creation (such as during unit tests), the # expected IDs for the permissions may not match up, leading to # SQL errors. This is ticket 14731 # Start with a clean slate and build the permissions as we # expect to see them in the fixtures. auth_models.Permission.objects.all().delete() contenttypes_models.ContentType.objects.all().delete() create_permissions(auth_models, [], verbosity=0) create_permissions(contenttypes_models, [], verbosity=0) stderr = StringIO() call_command('loaddata', 'test_permissions.json', verbosity=0, commit=False, stderr=stderr) self.assertEqual(stderr.getvalue(), '')
bsd-3-clause
liaorubei/depot_tools
third_party/boto/ses/__init__.py
72
2091
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2011 Harry Marr http://hmarr.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from connection import SESConnection from boto.regioninfo import RegionInfo def regions(): """ Get all available regions for the SES service. :rtype: list :return: A list of :class:`boto.regioninfo.RegionInfo` instances """ return [RegionInfo(name='us-east-1', endpoint='email.us-east-1.amazonaws.com', connection_cls=SESConnection)] def connect_to_region(region_name, **kw_params): """ Given a valid region name, return a :class:`boto.ses.connection.SESConnection`. :type: str :param region_name: The name of the region to connect to. :rtype: :class:`boto.ses.connection.SESConnection` or ``None`` :return: A connection to the given region, or None if an invalid region name is given """ for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None
bsd-3-clause
openstack/osprofiler
osprofiler/drivers/messaging.py
2
7425
# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import signal import time from oslo_utils import importutils from osprofiler.drivers import base class Messaging(base.Driver): def __init__(self, connection_str, project=None, service=None, host=None, context=None, conf=None, transport_url=None, idle_timeout=1, **kwargs): """Driver that uses messaging as transport for notifications :param connection_str: OSProfiler driver connection string, equals to messaging:// :param project: project name that will be included into notification :param service: service name that will be included into notification :param host: host name that will be included into notification :param context: oslo.messaging context :param conf: oslo.config CONF object :param transport_url: oslo.messaging transport, e.g. rabbit://rabbit:password@devstack:5672/ :param idle_timeout: how long to wait for new notifications after the last one seen in the trace; this parameter is useful to collect full trace of asynchronous commands, e.g. when user runs `osprofiler` right after `openstack server create` :param kwargs: black hole for any other parameters """ self.oslo_messaging = importutils.try_import("oslo_messaging") if not self.oslo_messaging: raise ValueError("Oslo.messaging library is required for " "messaging driver") super(Messaging, self).__init__(connection_str, project=project, service=service, host=host) self.context = context if not conf: oslo_config = importutils.try_import("oslo_config") if not oslo_config: raise ValueError("Oslo.config library is required for " "messaging driver") conf = oslo_config.cfg.CONF transport_kwargs = {} if transport_url: transport_kwargs["url"] = transport_url self.transport = self.oslo_messaging.get_notification_transport( conf, **transport_kwargs) self.client = self.oslo_messaging.Notifier( self.transport, publisher_id=self.host, driver="messaging", topics=["profiler"], retry=0) self.idle_timeout = idle_timeout @classmethod def get_name(cls): return "messaging" def notify(self, info, context=None): """Send notifications to backend via oslo.messaging notifier API. :param info: Contains information about trace element. In payload dict there are always 3 ids: "base_id" - uuid that is common for all notifications related to one trace. "parent_id" - uuid of parent element in trace "trace_id" - uuid of current element in trace With parent_id and trace_id it's quite simple to build tree of trace elements, which simplify analyze of trace. :param context: request context that is mostly used to specify current active user and tenant. """ info["project"] = self.project info["service"] = self.service self.client.info(context or self.context, "profiler.%s" % info["service"], info) def get_report(self, base_id): notification_endpoint = NotifyEndpoint(self.oslo_messaging, base_id) endpoints = [notification_endpoint] targets = [self.oslo_messaging.Target(topic="profiler")] server = self.oslo_messaging.notify.get_notification_listener( self.transport, targets, endpoints, executor="threading") state = dict(running=False) sfn = functools.partial(signal_handler, state=state) # modify signal handlers to handle interruption gracefully old_sigterm_handler = signal.signal(signal.SIGTERM, sfn) old_sigint_handler = signal.signal(signal.SIGINT, sfn) try: server.start() except self.oslo_messaging.server.ServerListenError: # failed to start the server raise except SignalExit: print("Execution interrupted while trying to connect to " "messaging server. No data was collected.") return {} # connected to server, now read the data try: # run until the trace is complete state["running"] = True while state["running"]: last_read_time = notification_endpoint.get_last_read_time() wait = self.idle_timeout - (time.time() - last_read_time) if wait < 0: state["running"] = False else: time.sleep(wait) except SignalExit: print("Execution interrupted. Terminating") finally: server.stop() server.wait() # restore original signal handlers signal.signal(signal.SIGTERM, old_sigterm_handler) signal.signal(signal.SIGINT, old_sigint_handler) events = notification_endpoint.get_messages() if not events: print("No events are collected for Trace UUID %s. Please note " "that osprofiler has read ALL events from profiler topic, " "but has not found any for specified Trace UUID." % base_id) for n in events: trace_id = n["trace_id"] parent_id = n["parent_id"] name = n["name"] project = n["project"] service = n["service"] host = n["info"]["host"] timestamp = n["timestamp"] self._append_results(trace_id, parent_id, name, project, service, host, timestamp, n) return self._parse_results() class NotifyEndpoint(object): def __init__(self, oslo_messaging, base_id): self.received_messages = [] self.last_read_time = time.time() self.filter_rule = oslo_messaging.NotificationFilter( payload={"base_id": base_id}) def info(self, ctxt, publisher_id, event_type, payload, metadata): self.received_messages.append(payload) self.last_read_time = time.time() def get_messages(self): return self.received_messages def get_last_read_time(self): return self.last_read_time # time when the latest event was received class SignalExit(BaseException): pass def signal_handler(signum, frame, state): state["running"] = False raise SignalExit()
apache-2.0
tiagofrepereira2012/tensorflow
tensorflow/contrib/learn/python/learn/dataframe/series.py
27
6950
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A Series represents a deferred Tensor computation in a DataFrame.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import ABCMeta from tensorflow.python.util.deprecation import deprecated class Series(object): """A single output series. Represents the deferred construction of a graph that computes the series values. Note every `Series` should be a `TransformedSeries`, except when mocked. """ __metaclass__ = ABCMeta @classmethod def register_unary_op(cls, series_method_name): """A decorator that registers `Transform`s as `Series` member functions. For example: ''' @series.Series.register_unary_op("log") class Logarithm(Transform): ... ''' The registered member function takes `args` and `kwargs`. These values will be passed to the `__init__` function for the decorated `Transform`. Args: series_method_name: the name under which to register the function. Returns: Decorator function. Raises: ValueError: another `Transform` is already registered under `series_method_name`. """ def register(transform_cls): if hasattr(cls, series_method_name): raise ValueError("Series already has a function registered as %s.", series_method_name) def _member_func(slf, *args, **kwargs): return transform_cls(*args, **kwargs)([slf])[0] setattr(cls, series_method_name, _member_func) return transform_cls return register @classmethod def register_binary_op(cls, series_method_name): """A decorator that registers `Transform`s as `Series` member functions. For example: ''' @series.Series.register_binary_op("__add___") class Sum(Transform): ... ''' The registered member function takes `args` and `kwargs`. These values will be passed to the `__init__` function for the decorated `Transform`. Args: series_method_name: the name under which to register the function. Returns: Decorator function. Raises: ValueError: another `Transform` is already registered under `series_method_name`. """ def register(transform_cls): if hasattr(cls, series_method_name): raise ValueError("Series already has a function registered as %s.", series_method_name) def _member_func(slf, b, *args, **kwargs): return transform_cls(*args, **kwargs)([slf, b])[0] setattr(cls, series_method_name, _member_func) return transform_cls return register def build(self, cache, **kwargs): """Returns a Tensor.""" raise NotImplementedError() class PredefinedSeries(Series): """A `Series` that requires the cache to already map a given name.""" @deprecated("2017-06-15", "contrib/learn/dataframe/** is deprecated.") def __init__(self, name, feature_spec): super(PredefinedSeries, self).__init__() self._name = name self._feature_spec = feature_spec @property def name(self): return self._name @property def feature_spec(self): return self._feature_spec def required_base_features(self): return {self.name: self.feature_spec} def build(self, cache, **kwargs): try: return cache[self.name] except KeyError: raise KeyError("Expected base feature not found: %s" % self._name) def __repr__(self): return "Predefined: %s" % self.name def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ else: return False def __ne__(self, other): return not self.__eq__(other) class TransformedSeries(Series): """A `Series` that results from applying a `Transform` to a list of inputs.""" @deprecated("2017-06-15", "contrib/learn/dataframe/** is deprecated.") def __init__(self, input_series, transform, output_name): super(TransformedSeries, self).__init__() self._input_series = input_series self._transform = transform self._output_name = output_name if output_name is None: raise ValueError("output_name must be provided") if len(input_series) != transform.input_valency: raise ValueError("Expected %s input Series but received %s." % (transform.input_valency, len(input_series))) self._repr = TransformedSeries.make_repr( self._input_series, self._transform, self._output_name) def required_base_features(self): # Note the only items in the result are those that can be traced to a # PredefinedSeries. result = {} for s in self._input_series: # It's OK to overwrite keys since we only want one copy of each anyway. # We assume (but don't bother checking) that the spec is the same in each # case. result.update(s.required_base_features) return result def build(self, cache=None, **kwargs): if cache is None: cache = {} all_outputs = self._transform.build_transitive( self._input_series, cache, **kwargs) return getattr(all_outputs, self._output_name) def __repr__(self): return self._repr # Note we need to generate series reprs from Transform, without needing the # series themselves. So we just make this public. Alternatively we could # create throwaway series just in order to call repr() on them. @staticmethod def make_repr(input_series, transform, output_name): """Generate a key for caching Tensors produced for a TransformedSeries. Generally we a need a deterministic unique key representing which transform was applied to which inputs, and which output was selected. Args: input_series: an iterable of input `Series` for the `Transform` transform: the `Transform` being applied output_name: the name of the specific output from the `Transform` that is to be cached Returns: A string suitable for use as a cache key for Tensors produced via a TransformedSeries """ input_series_keys = [repr(series) for series in input_series] input_series_keys_joined = ", ".join(input_series_keys) return "%s(%s)[%s]" % ( repr(transform), input_series_keys_joined, output_name)
apache-2.0
lumig242/Hue-Integration-with-CDAP
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Cipher/test_AES.py
115
79975
# -*- coding: utf-8 -*- # # SelfTest/Cipher/AES.py: Self-test for the AES cipher # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """Self-test suite for Crypto.Cipher.AES""" __revision__ = "$Id$" from common import dict # For compatibility with Python 2.1 and 2.2 from Crypto.Util.py3compat import * from binascii import hexlify # This is a list of (plaintext, ciphertext, key[, description[, params]]) tuples. test_data = [ # FIPS PUB 197 test vectors # http://csrc.nist.gov/publications/fips/fips197/fips-197.pdf ('00112233445566778899aabbccddeeff', '69c4e0d86a7b0430d8cdb78070b4c55a', '000102030405060708090a0b0c0d0e0f', 'FIPS 197 C.1 (AES-128)'), ('00112233445566778899aabbccddeeff', 'dda97ca4864cdfe06eaf70a0ec0d7191', '000102030405060708090a0b0c0d0e0f1011121314151617', 'FIPS 197 C.2 (AES-192)'), ('00112233445566778899aabbccddeeff', '8ea2b7ca516745bfeafc49904b496089', '000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f', 'FIPS 197 C.3 (AES-256)'), # Rijndael128 test vectors # Downloaded 2008-09-13 from # http://www.iaik.tugraz.at/Research/krypto/AES/old/~rijmen/rijndael/testvalues.tar.gz # ecb_tbl.txt, KEYSIZE=128 ('506812a45f08c889b97f5980038b8359', 'd8f532538289ef7d06b506a4fd5be9c9', '00010203050607080a0b0c0d0f101112', 'ecb-tbl-128: I=1'), ('5c6d71ca30de8b8b00549984d2ec7d4b', '59ab30f4d4ee6e4ff9907ef65b1fb68c', '14151617191a1b1c1e1f202123242526', 'ecb-tbl-128: I=2'), ('53f3f4c64f8616e4e7c56199f48f21f6', 'bf1ed2fcb2af3fd41443b56d85025cb1', '28292a2b2d2e2f30323334353738393a', 'ecb-tbl-128: I=3'), ('a1eb65a3487165fb0f1c27ff9959f703', '7316632d5c32233edcb0780560eae8b2', '3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-128: I=4'), ('3553ecf0b1739558b08e350a98a39bfa', '408c073e3e2538072b72625e68b8364b', '50515253555657585a5b5c5d5f606162', 'ecb-tbl-128: I=5'), ('67429969490b9711ae2b01dc497afde8', 'e1f94dfa776597beaca262f2f6366fea', '64656667696a6b6c6e6f707173747576', 'ecb-tbl-128: I=6'), ('93385c1f2aec8bed192f5a8e161dd508', 'f29e986c6a1c27d7b29ffd7ee92b75f1', '78797a7b7d7e7f80828384858788898a', 'ecb-tbl-128: I=7'), ('b5bf946be19beb8db3983b5f4c6e8ddb', '131c886a57f8c2e713aba6955e2b55b5', '8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-128: I=8'), ('41321ee10e21bd907227c4450ff42324', 'd2ab7662df9b8c740210e5eeb61c199d', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2', 'ecb-tbl-128: I=9'), ('00a82f59c91c8486d12c0a80124f6089', '14c10554b2859c484cab5869bbe7c470', 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', 'ecb-tbl-128: I=10'), ('7ce0fd076754691b4bbd9faf8a1372fe', 'db4d498f0a49cf55445d502c1f9ab3b5', 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9da', 'ecb-tbl-128: I=11'), ('23605a8243d07764541bc5ad355b3129', '6d96fef7d66590a77a77bb2056667f7f', 'dcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-128: I=12'), ('12a8cfa23ea764fd876232b4e842bc44', '316fb68edba736c53e78477bf913725c', 'f0f1f2f3f5f6f7f8fafbfcfdfe010002', 'ecb-tbl-128: I=13'), ('bcaf32415e8308b3723e5fdd853ccc80', '6936f2b93af8397fd3a771fc011c8c37', '04050607090a0b0c0e0f101113141516', 'ecb-tbl-128: I=14'), ('89afae685d801ad747ace91fc49adde0', 'f3f92f7a9c59179c1fcc2c2ba0b082cd', '2c2d2e2f31323334363738393b3c3d3e', 'ecb-tbl-128: I=15'), ('f521d07b484357c4a69e76124a634216', '6a95ea659ee3889158e7a9152ff04ebc', '40414243454647484a4b4c4d4f505152', 'ecb-tbl-128: I=16'), ('3e23b3bc065bcc152407e23896d77783', '1959338344e945670678a5d432c90b93', '54555657595a5b5c5e5f606163646566', 'ecb-tbl-128: I=17'), ('79f0fba002be1744670e7e99290d8f52', 'e49bddd2369b83ee66e6c75a1161b394', '68696a6b6d6e6f70727374757778797a', 'ecb-tbl-128: I=18'), ('da23fe9d5bd63e1d72e3dafbe21a6c2a', 'd3388f19057ff704b70784164a74867d', '7c7d7e7f81828384868788898b8c8d8e', 'ecb-tbl-128: I=19'), ('e3f5698ba90b6a022efd7db2c7e6c823', '23aa03e2d5e4cd24f3217e596480d1e1', 'a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', 'ecb-tbl-128: I=20'), ('bdc2691d4f1b73d2700679c3bcbf9c6e', 'c84113d68b666ab2a50a8bdb222e91b9', 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2', 'ecb-tbl-128: I=21'), ('ba74e02093217ee1ba1b42bd5624349a', 'ac02403981cd4340b507963db65cb7b6', '08090a0b0d0e0f10121314151718191a', 'ecb-tbl-128: I=22'), ('b5c593b5851c57fbf8b3f57715e8f680', '8d1299236223359474011f6bf5088414', '6c6d6e6f71727374767778797b7c7d7e', 'ecb-tbl-128: I=23'), ('3da9bd9cec072381788f9387c3bbf4ee', '5a1d6ab8605505f7977e55b9a54d9b90', '80818283858687888a8b8c8d8f909192', 'ecb-tbl-128: I=24'), ('4197f3051121702ab65d316b3c637374', '72e9c2d519cf555e4208805aabe3b258', '94959697999a9b9c9e9fa0a1a3a4a5a6', 'ecb-tbl-128: I=25'), ('9f46c62ec4f6ee3f6e8c62554bc48ab7', 'a8f3e81c4a23a39ef4d745dffe026e80', 'a8a9aaabadaeafb0b2b3b4b5b7b8b9ba', 'ecb-tbl-128: I=26'), ('0220673fe9e699a4ebc8e0dbeb6979c8', '546f646449d31458f9eb4ef5483aee6c', 'bcbdbebfc1c2c3c4c6c7c8c9cbcccdce', 'ecb-tbl-128: I=27'), ('b2b99171337ded9bc8c2c23ff6f18867', '4dbe4bc84ac797c0ee4efb7f1a07401c', 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2', 'ecb-tbl-128: I=28'), ('a7facf4e301e984e5efeefd645b23505', '25e10bfb411bbd4d625ac8795c8ca3b3', 'e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', 'ecb-tbl-128: I=29'), ('f7c762e4a9819160fd7acfb6c4eedcdd', '315637405054ec803614e43def177579', 'f8f9fafbfdfefe00020304050708090a', 'ecb-tbl-128: I=30'), ('9b64fc21ea08709f4915436faa70f1be', '60c5bc8a1410247295c6386c59e572a8', '0c0d0e0f11121314161718191b1c1d1e', 'ecb-tbl-128: I=31'), ('52af2c3de07ee6777f55a4abfc100b3f', '01366fc8ca52dfe055d6a00a76471ba6', '20212223252627282a2b2c2d2f303132', 'ecb-tbl-128: I=32'), ('2fca001224386c57aa3f968cbe2c816f', 'ecc46595516ec612449c3f581e7d42ff', '34353637393a3b3c3e3f404143444546', 'ecb-tbl-128: I=33'), ('4149c73658a4a9c564342755ee2c132f', '6b7ffe4c602a154b06ee9c7dab5331c9', '48494a4b4d4e4f50525354555758595a', 'ecb-tbl-128: I=34'), ('af60005a00a1772f7c07a48a923c23d2', '7da234c14039a240dd02dd0fbf84eb67', '5c5d5e5f61626364666768696b6c6d6e', 'ecb-tbl-128: I=35'), ('6fccbc28363759914b6f0280afaf20c6', 'c7dc217d9e3604ffe7e91f080ecd5a3a', '70717273757677787a7b7c7d7f808182', 'ecb-tbl-128: I=36'), ('7d82a43ddf4fefa2fc5947499884d386', '37785901863f5c81260ea41e7580cda5', '84858687898a8b8c8e8f909193949596', 'ecb-tbl-128: I=37'), ('5d5a990eaab9093afe4ce254dfa49ef9', 'a07b9338e92ed105e6ad720fccce9fe4', '98999a9b9d9e9fa0a2a3a4a5a7a8a9aa', 'ecb-tbl-128: I=38'), ('4cd1e2fd3f4434b553aae453f0ed1a02', 'ae0fb9722418cc21a7da816bbc61322c', 'acadaeafb1b2b3b4b6b7b8b9bbbcbdbe', 'ecb-tbl-128: I=39'), ('5a2c9a9641d4299125fa1b9363104b5e', 'c826a193080ff91ffb21f71d3373c877', 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2', 'ecb-tbl-128: I=40'), ('b517fe34c0fa217d341740bfd4fe8dd4', '1181b11b0e494e8d8b0aa6b1d5ac2c48', 'd4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', 'ecb-tbl-128: I=41'), ('014baf2278a69d331d5180103643e99a', '6743c3d1519ab4f2cd9a78ab09a511bd', 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fa', 'ecb-tbl-128: I=42'), ('b529bd8164f20d0aa443d4932116841c', 'dc55c076d52bacdf2eefd952946a439d', 'fcfdfeff01020304060708090b0c0d0e', 'ecb-tbl-128: I=43'), ('2e596dcbb2f33d4216a1176d5bd1e456', '711b17b590ffc72b5c8e342b601e8003', '10111213151617181a1b1c1d1f202122', 'ecb-tbl-128: I=44'), ('7274a1ea2b7ee2424e9a0e4673689143', '19983bb0950783a537e1339f4aa21c75', '24252627292a2b2c2e2f303133343536', 'ecb-tbl-128: I=45'), ('ae20020bd4f13e9d90140bee3b5d26af', '3ba7762e15554169c0f4fa39164c410c', '38393a3b3d3e3f40424344454748494a', 'ecb-tbl-128: I=46'), ('baac065da7ac26e855e79c8849d75a02', 'a0564c41245afca7af8aa2e0e588ea89', '4c4d4e4f51525354565758595b5c5d5e', 'ecb-tbl-128: I=47'), ('7c917d8d1d45fab9e2540e28832540cc', '5e36a42a2e099f54ae85ecd92e2381ed', '60616263656667686a6b6c6d6f707172', 'ecb-tbl-128: I=48'), ('bde6f89e16daadb0e847a2a614566a91', '770036f878cd0f6ca2268172f106f2fe', '74757677797a7b7c7e7f808183848586', 'ecb-tbl-128: I=49'), ('c9de163725f1f5be44ebb1db51d07fbc', '7e4e03908b716116443ccf7c94e7c259', '88898a8b8d8e8f90929394959798999a', 'ecb-tbl-128: I=50'), ('3af57a58f0c07dffa669572b521e2b92', '482735a48c30613a242dd494c7f9185d', '9c9d9e9fa1a2a3a4a6a7a8a9abacadae', 'ecb-tbl-128: I=51'), ('3d5ebac306dde4604f1b4fbbbfcdae55', 'b4c0f6c9d4d7079addf9369fc081061d', 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2', 'ecb-tbl-128: I=52'), ('c2dfa91bceb76a1183c995020ac0b556', 'd5810fe0509ac53edcd74f89962e6270', 'c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', 'ecb-tbl-128: I=53'), ('c70f54305885e9a0746d01ec56c8596b', '03f17a16b3f91848269ecdd38ebb2165', 'd8d9dadbdddedfe0e2e3e4e5e7e8e9ea', 'ecb-tbl-128: I=54'), ('c4f81b610e98012ce000182050c0c2b2', 'da1248c3180348bad4a93b4d9856c9df', 'ecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', 'ecb-tbl-128: I=55'), ('eaab86b1d02a95d7404eff67489f97d4', '3d10d7b63f3452c06cdf6cce18be0c2c', '00010203050607080a0b0c0d0f101112', 'ecb-tbl-128: I=56'), ('7c55bdb40b88870b52bec3738de82886', '4ab823e7477dfddc0e6789018fcb6258', '14151617191a1b1c1e1f202123242526', 'ecb-tbl-128: I=57'), ('ba6eaa88371ff0a3bd875e3f2a975ce0', 'e6478ba56a77e70cfdaa5c843abde30e', '28292a2b2d2e2f30323334353738393a', 'ecb-tbl-128: I=58'), ('08059130c4c24bd30cf0575e4e0373dc', '1673064895fbeaf7f09c5429ff75772d', '3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-128: I=59'), ('9a8eab004ef53093dfcf96f57e7eda82', '4488033ae9f2efd0ca9383bfca1a94e9', '50515253555657585a5b5c5d5f606162', 'ecb-tbl-128: I=60'), ('0745b589e2400c25f117b1d796c28129', '978f3b8c8f9d6f46626cac3c0bcb9217', '64656667696a6b6c6e6f707173747576', 'ecb-tbl-128: I=61'), ('2f1777781216cec3f044f134b1b92bbe', 'e08c8a7e582e15e5527f1d9e2eecb236', '78797a7b7d7e7f80828384858788898a', 'ecb-tbl-128: I=62'), ('353a779ffc541b3a3805d90ce17580fc', 'cec155b76ac5ffda4cf4f9ca91e49a7a', '8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-128: I=63'), ('1a1eae4415cefcf08c4ac1c8f68bea8f', 'd5ac7165763225dd2a38cdc6862c29ad', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2', 'ecb-tbl-128: I=64'), ('e6e7e4e5b0b3b2b5d4d5aaab16111013', '03680fe19f7ce7275452020be70e8204', 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', 'ecb-tbl-128: I=65'), ('f8f9fafbfbf8f9e677767170efe0e1e2', '461df740c9781c388e94bb861ceb54f6', 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9da', 'ecb-tbl-128: I=66'), ('63626160a1a2a3a445444b4a75727370', '451bd60367f96483042742219786a074', 'dcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-128: I=67'), ('717073720605040b2d2c2b2a05fafbf9', 'e4dfa42671a02e57ef173b85c0ea9f2b', 'f0f1f2f3f5f6f7f8fafbfcfdfe010002', 'ecb-tbl-128: I=68'), ('78797a7beae9e8ef3736292891969794', 'ed11b89e76274282227d854700a78b9e', '04050607090a0b0c0e0f101113141516', 'ecb-tbl-128: I=69'), ('838281803231300fdddcdbdaa0afaead', '433946eaa51ea47af33895f2b90b3b75', '18191a1b1d1e1f20222324252728292a', 'ecb-tbl-128: I=70'), ('18191a1bbfbcbdba75747b7a7f78797a', '6bc6d616a5d7d0284a5910ab35022528', '2c2d2e2f31323334363738393b3c3d3e', 'ecb-tbl-128: I=71'), ('848586879b989996a3a2a5a4849b9a99', 'd2a920ecfe919d354b5f49eae9719c98', '40414243454647484a4b4c4d4f505152', 'ecb-tbl-128: I=72'), ('0001020322212027cacbf4f551565754', '3a061b17f6a92885efbd0676985b373d', '54555657595a5b5c5e5f606163646566', 'ecb-tbl-128: I=73'), ('cecfcccdafacadb2515057564a454447', 'fadeec16e33ea2f4688499d157e20d8f', '68696a6b6d6e6f70727374757778797a', 'ecb-tbl-128: I=74'), ('92939091cdcecfc813121d1c80878685', '5cdefede59601aa3c3cda36fa6b1fa13', '7c7d7e7f81828384868788898b8c8d8e', 'ecb-tbl-128: I=75'), ('d2d3d0d16f6c6d6259585f5ed1eeefec', '9574b00039844d92ebba7ee8719265f8', '90919293959697989a9b9c9d9fa0a1a2', 'ecb-tbl-128: I=76'), ('acadaeaf878485820f0e1110d5d2d3d0', '9a9cf33758671787e5006928188643fa', 'a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', 'ecb-tbl-128: I=77'), ('9091929364676619e6e7e0e1757a7b78', '2cddd634c846ba66bb46cbfea4a674f9', 'b8b9babbbdbebfc0c2c3c4c5c7c8c9ca', 'ecb-tbl-128: I=78'), ('babbb8b98a89888f74757a7b92959497', 'd28bae029393c3e7e26e9fafbbb4b98f', 'cccdcecfd1d2d3d4d6d7d8d9dbdcddde', 'ecb-tbl-128: I=79'), ('8d8c8f8e6e6d6c633b3a3d3ccad5d4d7', 'ec27529b1bee0a9ab6a0d73ebc82e9b7', 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2', 'ecb-tbl-128: I=80'), ('86878485010203040808f7f767606162', '3cb25c09472aff6ee7e2b47ccd7ccb17', 'f4f5f6f7f9fafbfcfefe010103040506', 'ecb-tbl-128: I=81'), ('8e8f8c8d656667788a8b8c8d010e0f0c', 'dee33103a7283370d725e44ca38f8fe5', '08090a0b0d0e0f10121314151718191a', 'ecb-tbl-128: I=82'), ('c8c9cacb858687807a7b7475e7e0e1e2', '27f9bcd1aac64bffc11e7815702c1a69', '1c1d1e1f21222324262728292b2c2d2e', 'ecb-tbl-128: I=83'), ('6d6c6f6e5053525d8c8d8a8badd2d3d0', '5df534ffad4ed0749a9988e9849d0021', '30313233353637383a3b3c3d3f404142', 'ecb-tbl-128: I=84'), ('28292a2b393a3b3c0607181903040506', 'a48bee75db04fb60ca2b80f752a8421b', '44454647494a4b4c4e4f505153545556', 'ecb-tbl-128: I=85'), ('a5a4a7a6b0b3b28ddbdadddcbdb2b3b0', '024c8cf70bc86ee5ce03678cb7af45f9', '58595a5b5d5e5f60626364656768696a', 'ecb-tbl-128: I=86'), ('323330316467666130313e3f2c2b2a29', '3c19ac0f8a3a3862ce577831301e166b', '6c6d6e6f71727374767778797b7c7d7e', 'ecb-tbl-128: I=87'), ('27262524080b0a05171611100b141516', 'c5e355b796a57421d59ca6be82e73bca', '80818283858687888a8b8c8d8f909192', 'ecb-tbl-128: I=88'), ('040506074142434435340b0aa3a4a5a6', 'd94033276417abfb05a69d15b6e386e2', '94959697999a9b9c9e9fa0a1a3a4a5a6', 'ecb-tbl-128: I=89'), ('242526271112130c61606766bdb2b3b0', '24b36559ea3a9b9b958fe6da3e5b8d85', 'a8a9aaabadaeafb0b2b3b4b5b7b8b9ba', 'ecb-tbl-128: I=90'), ('4b4a4948252627209e9f9091cec9c8cb', '20fd4feaa0e8bf0cce7861d74ef4cb72', 'bcbdbebfc1c2c3c4c6c7c8c9cbcccdce', 'ecb-tbl-128: I=91'), ('68696a6b6665646b9f9e9998d9e6e7e4', '350e20d5174277b9ec314c501570a11d', 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2', 'ecb-tbl-128: I=92'), ('34353637c5c6c7c0f0f1eeef7c7b7a79', '87a29d61b7c604d238fe73045a7efd57', 'e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', 'ecb-tbl-128: I=93'), ('32333031c2c1c13f0d0c0b0a050a0b08', '2c3164c1cc7d0064816bdc0faa362c52', 'f8f9fafbfdfefe00020304050708090a', 'ecb-tbl-128: I=94'), ('cdcccfcebebdbcbbabaaa5a4181f1e1d', '195fe5e8a05a2ed594f6e4400eee10b3', '0c0d0e0f11121314161718191b1c1d1e', 'ecb-tbl-128: I=95'), ('212023223635343ba0a1a6a7445b5a59', 'e4663df19b9a21a5a284c2bd7f905025', '20212223252627282a2b2c2d2f303132', 'ecb-tbl-128: I=96'), ('0e0f0c0da8abaaad2f2e515002050407', '21b88714cfb4e2a933bd281a2c4743fd', '34353637393a3b3c3e3f404143444546', 'ecb-tbl-128: I=97'), ('070605042a2928378e8f8889bdb2b3b0', 'cbfc3980d704fd0fc54378ab84e17870', '48494a4b4d4e4f50525354555758595a', 'ecb-tbl-128: I=98'), ('cbcac9c893909196a9a8a7a6a5a2a3a0', 'bc5144baa48bdeb8b63e22e03da418ef', '5c5d5e5f61626364666768696b6c6d6e', 'ecb-tbl-128: I=99'), ('80818283c1c2c3cc9c9d9a9b0cf3f2f1', '5a1dbaef1ee2984b8395da3bdffa3ccc', '70717273757677787a7b7c7d7f808182', 'ecb-tbl-128: I=100'), ('1213101125262720fafbe4e5b1b6b7b4', 'f0b11cd0729dfcc80cec903d97159574', '84858687898a8b8c8e8f909193949596', 'ecb-tbl-128: I=101'), ('7f7e7d7c3033320d97969190222d2c2f', '9f95314acfddc6d1914b7f19a9cc8209', '98999a9b9d9e9fa0a2a3a4a5a7a8a9aa', 'ecb-tbl-128: I=102'), ('4e4f4c4d484b4a4d81808f8e53545556', '595736f6f0f70914a94e9e007f022519', 'acadaeafb1b2b3b4b6b7b8b9bbbcbdbe', 'ecb-tbl-128: I=103'), ('dcdddedfb0b3b2bd15141312a1bebfbc', '1f19f57892cae586fcdfb4c694deb183', 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2', 'ecb-tbl-128: I=104'), ('93929190282b2a2dc4c5fafb92959497', '540700ee1f6f3dab0b3eddf6caee1ef5', 'd4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', 'ecb-tbl-128: I=105'), ('f5f4f7f6c4c7c6d9373631307e717073', '14a342a91019a331687a2254e6626ca2', 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fa', 'ecb-tbl-128: I=106'), ('93929190b6b5b4b364656a6b05020300', '7b25f3c3b2eea18d743ef283140f29ff', 'fcfdfeff01020304060708090b0c0d0e', 'ecb-tbl-128: I=107'), ('babbb8b90d0e0f00a4a5a2a3043b3a39', '46c2587d66e5e6fa7f7ca6411ad28047', '10111213151617181a1b1c1d1f202122', 'ecb-tbl-128: I=108'), ('d8d9dadb7f7c7d7a10110e0f787f7e7d', '09470e72229d954ed5ee73886dfeeba9', '24252627292a2b2c2e2f303133343536', 'ecb-tbl-128: I=109'), ('fefffcfdefeced923b3a3d3c6768696a', 'd77c03de92d4d0d79ef8d4824ef365eb', '38393a3b3d3e3f40424344454748494a', 'ecb-tbl-128: I=110'), ('d6d7d4d58a89888f96979899a5a2a3a0', '1d190219f290e0f1715d152d41a23593', '4c4d4e4f51525354565758595b5c5d5e', 'ecb-tbl-128: I=111'), ('18191a1ba8abaaa5303136379b848586', 'a2cd332ce3a0818769616292e87f757b', '60616263656667686a6b6c6d6f707172', 'ecb-tbl-128: I=112'), ('6b6a6968a4a7a6a1d6d72829b0b7b6b5', 'd54afa6ce60fbf9341a3690e21385102', '74757677797a7b7c7e7f808183848586', 'ecb-tbl-128: I=113'), ('000102038a89889755545352a6a9a8ab', '06e5c364ded628a3f5e05e613e356f46', '88898a8b8d8e8f90929394959798999a', 'ecb-tbl-128: I=114'), ('2d2c2f2eb3b0b1b6b6b7b8b9f2f5f4f7', 'eae63c0e62556dac85d221099896355a', '9c9d9e9fa1a2a3a4a6a7a8a9abacadae', 'ecb-tbl-128: I=115'), ('979695943536373856575051e09f9e9d', '1fed060e2c6fc93ee764403a889985a2', 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2', 'ecb-tbl-128: I=116'), ('a4a5a6a7989b9a9db1b0afae7a7d7c7f', 'c25235c1a30fdec1c7cb5c5737b2a588', 'c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', 'ecb-tbl-128: I=117'), ('c1c0c3c2686b6a55a8a9aeafeae5e4e7', '796dbef95147d4d30873ad8b7b92efc0', 'd8d9dadbdddedfe0e2e3e4e5e7e8e9ea', 'ecb-tbl-128: I=118'), ('c1c0c3c2141716118c8d828364636261', 'cbcf0fb34d98d0bd5c22ce37211a46bf', 'ecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', 'ecb-tbl-128: I=119'), ('93929190cccfcec196979091e0fffefd', '94b44da6466126cafa7c7fd09063fc24', '00010203050607080a0b0c0d0f101112', 'ecb-tbl-128: I=120'), ('b4b5b6b7f9fafbfc25241b1a6e69686b', 'd78c5b5ebf9b4dbda6ae506c5074c8fe', '14151617191a1b1c1e1f202123242526', 'ecb-tbl-128: I=121'), ('868784850704051ac7c6c1c08788898a', '6c27444c27204b043812cf8cf95f9769', '28292a2b2d2e2f30323334353738393a', 'ecb-tbl-128: I=122'), ('f4f5f6f7aaa9a8affdfcf3f277707172', 'be94524ee5a2aa50bba8b75f4c0aebcf', '3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-128: I=123'), ('d3d2d1d00605040bc3c2c5c43e010003', 'a0aeaae91ba9f31f51aeb3588cf3a39e', '50515253555657585a5b5c5d5f606162', 'ecb-tbl-128: I=124'), ('73727170424140476a6b74750d0a0b08', '275297779c28266ef9fe4c6a13c08488', '64656667696a6b6c6e6f707173747576', 'ecb-tbl-128: I=125'), ('c2c3c0c10a0908f754555253a1aeafac', '86523d92bb8672cb01cf4a77fd725882', '78797a7b7d7e7f80828384858788898a', 'ecb-tbl-128: I=126'), ('6d6c6f6ef8fbfafd82838c8df8fffefd', '4b8327640e9f33322a04dd96fcbf9a36', '8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-128: I=127'), ('f5f4f7f684878689a6a7a0a1d2cdcccf', 'ce52af650d088ca559425223f4d32694', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2', 'ecb-tbl-128: I=128'), # ecb_tbl.txt, KEYSIZE=192 ('2d33eef2c0430a8a9ebf45e809c40bb6', 'dff4945e0336df4c1c56bc700eff837f', '00010203050607080a0b0c0d0f10111214151617191a1b1c', 'ecb-tbl-192: I=1'), ('6aa375d1fa155a61fb72353e0a5a8756', 'b6fddef4752765e347d5d2dc196d1252', '1e1f20212324252628292a2b2d2e2f30323334353738393a', 'ecb-tbl-192: I=2'), ('bc3736518b9490dcb8ed60eb26758ed4', 'd23684e3d963b3afcf1a114aca90cbd6', '3c3d3e3f41424344464748494b4c4d4e5051525355565758', 'ecb-tbl-192: I=3'), ('aa214402b46cffb9f761ec11263a311e', '3a7ac027753e2a18c2ceab9e17c11fd0', '5a5b5c5d5f60616264656667696a6b6c6e6f707173747576', 'ecb-tbl-192: I=4'), ('02aea86e572eeab66b2c3af5e9a46fd6', '8f6786bd007528ba26603c1601cdd0d8', '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394', 'ecb-tbl-192: I=5'), ('e2aef6acc33b965c4fa1f91c75ff6f36', 'd17d073b01e71502e28b47ab551168b3', '969798999b9c9d9ea0a1a2a3a5a6a7a8aaabacadafb0b1b2', 'ecb-tbl-192: I=6'), ('0659df46427162b9434865dd9499f91d', 'a469da517119fab95876f41d06d40ffa', 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6c8c9cacbcdcecfd0', 'ecb-tbl-192: I=7'), ('49a44239c748feb456f59c276a5658df', '6091aa3b695c11f5c0b6ad26d3d862ff', 'd2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-192: I=8'), ('66208f6e9d04525bdedb2733b6a6be37', '70f9e67f9f8df1294131662dc6e69364', 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c', 'ecb-tbl-192: I=9'), ('3393f8dfc729c97f5480b950bc9666b0', 'd154dcafad8b207fa5cbc95e9996b559', '0e0f10111314151618191a1b1d1e1f20222324252728292a', 'ecb-tbl-192: I=10'), ('606834c8ce063f3234cf1145325dbd71', '4934d541e8b46fa339c805a7aeb9e5da', '2c2d2e2f31323334363738393b3c3d3e4041424345464748', 'ecb-tbl-192: I=11'), ('fec1c04f529bbd17d8cecfcc4718b17f', '62564c738f3efe186e1a127a0c4d3c61', '4a4b4c4d4f50515254555657595a5b5c5e5f606163646566', 'ecb-tbl-192: I=12'), ('32df99b431ed5dc5acf8caf6dc6ce475', '07805aa043986eb23693e23bef8f3438', '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384', 'ecb-tbl-192: I=13'), ('7fdc2b746f3f665296943b83710d1f82', 'df0b4931038bade848dee3b4b85aa44b', '868788898b8c8d8e90919293959697989a9b9c9d9fa0a1a2', 'ecb-tbl-192: I=14'), ('8fba1510a3c5b87e2eaa3f7a91455ca2', '592d5fded76582e4143c65099309477c', 'a4a5a6a7a9aaabacaeafb0b1b3b4b5b6b8b9babbbdbebfc0', 'ecb-tbl-192: I=15'), ('2c9b468b1c2eed92578d41b0716b223b', 'c9b8d6545580d3dfbcdd09b954ed4e92', 'c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', 'ecb-tbl-192: I=16'), ('0a2bbf0efc6bc0034f8a03433fca1b1a', '5dccd5d6eb7c1b42acb008201df707a0', 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfc', 'ecb-tbl-192: I=17'), ('25260e1f31f4104d387222e70632504b', 'a2a91682ffeb6ed1d34340946829e6f9', 'fefe01010304050608090a0b0d0e0f10121314151718191a', 'ecb-tbl-192: I=18'), ('c527d25a49f08a5228d338642ae65137', 'e45d185b797000348d9267960a68435d', '1c1d1e1f21222324262728292b2c2d2e3031323335363738', 'ecb-tbl-192: I=19'), ('3b49fc081432f5890d0e3d87e884a69e', '45e060dae5901cda8089e10d4f4c246b', '3a3b3c3d3f40414244454647494a4b4c4e4f505153545556', 'ecb-tbl-192: I=20'), ('d173f9ed1e57597e166931df2754a083', 'f6951afacc0079a369c71fdcff45df50', '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374', 'ecb-tbl-192: I=21'), ('8c2b7cafa5afe7f13562daeae1adede0', '9e95e00f351d5b3ac3d0e22e626ddad6', '767778797b7c7d7e80818283858687888a8b8c8d8f909192', 'ecb-tbl-192: I=22'), ('aaf4ec8c1a815aeb826cab741339532c', '9cb566ff26d92dad083b51fdc18c173c', '94959697999a9b9c9e9fa0a1a3a4a5a6a8a9aaabadaeafb0', 'ecb-tbl-192: I=23'), ('40be8c5d9108e663f38f1a2395279ecf', 'c9c82766176a9b228eb9a974a010b4fb', 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebec', 'ecb-tbl-192: I=24'), ('0c8ad9bc32d43e04716753aa4cfbe351', 'd8e26aa02945881d5137f1c1e1386e88', '2a2b2c2d2f30313234353637393a3b3c3e3f404143444546', 'ecb-tbl-192: I=25'), ('1407b1d5f87d63357c8dc7ebbaebbfee', 'c0e024ccd68ff5ffa4d139c355a77c55', '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364', 'ecb-tbl-192: I=26'), ('e62734d1ae3378c4549e939e6f123416', '0b18b3d16f491619da338640df391d43', '84858687898a8b8c8e8f90919394959698999a9b9d9e9fa0', 'ecb-tbl-192: I=27'), ('5a752cff2a176db1a1de77f2d2cdee41', 'dbe09ac8f66027bf20cb6e434f252efc', 'a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', 'ecb-tbl-192: I=28'), ('a9c8c3a4eabedc80c64730ddd018cd88', '6d04e5e43c5b9cbe05feb9606b6480fe', 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdc', 'ecb-tbl-192: I=29'), ('ee9b3dbbdb86180072130834d305999a', 'dd1d6553b96be526d9fee0fbd7176866', '1a1b1c1d1f20212224252627292a2b2c2e2f303133343536', 'ecb-tbl-192: I=30'), ('a7fa8c3586b8ebde7568ead6f634a879', '0260ca7e3f979fd015b0dd4690e16d2a', '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354', 'ecb-tbl-192: I=31'), ('37e0f4a87f127d45ac936fe7ad88c10a', '9893734de10edcc8a67c3b110b8b8cc6', '929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', 'ecb-tbl-192: I=32'), ('3f77d8b5d92bac148e4e46f697a535c5', '93b30b750516b2d18808d710c2ee84ef', '464748494b4c4d4e50515253555657585a5b5c5d5f606162', 'ecb-tbl-192: I=33'), ('d25ebb686c40f7e2c4da1014936571ca', '16f65fa47be3cb5e6dfe7c6c37016c0e', '828384858788898a8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-192: I=34'), ('4f1c769d1e5b0552c7eca84dea26a549', 'f3847210d5391e2360608e5acb560581', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbc', 'ecb-tbl-192: I=35'), ('8548e2f882d7584d0fafc54372b6633a', '8754462cd223366d0753913e6af2643d', 'bebfc0c1c3c4c5c6c8c9cacbcdcecfd0d2d3d4d5d7d8d9da', 'ecb-tbl-192: I=36'), ('87d7a336cb476f177cd2a51af2a62cdf', '1ea20617468d1b806a1fd58145462017', 'dcdddedfe1e2e3e4e6e7e8e9ebecedeef0f1f2f3f5f6f7f8', 'ecb-tbl-192: I=37'), ('03b1feac668c4e485c1065dfc22b44ee', '3b155d927355d737c6be9dda60136e2e', 'fafbfcfdfe01000204050607090a0b0c0e0f101113141516', 'ecb-tbl-192: I=38'), ('bda15e66819fa72d653a6866aa287962', '26144f7b66daa91b6333dbd3850502b3', '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334', 'ecb-tbl-192: I=39'), ('4d0c7a0d2505b80bf8b62ceb12467f0a', 'e4f9a4ab52ced8134c649bf319ebcc90', '363738393b3c3d3e40414243454647484a4b4c4d4f505152', 'ecb-tbl-192: I=40'), ('626d34c9429b37211330986466b94e5f', 'b9ddd29ac6128a6cab121e34a4c62b36', '54555657595a5b5c5e5f60616364656668696a6b6d6e6f70', 'ecb-tbl-192: I=41'), ('333c3e6bf00656b088a17e5ff0e7f60a', '6fcddad898f2ce4eff51294f5eaaf5c9', '727374757778797a7c7d7e7f81828384868788898b8c8d8e', 'ecb-tbl-192: I=42'), ('687ed0cdc0d2a2bc8c466d05ef9d2891', 'c9a6fe2bf4028080bea6f7fc417bd7e3', '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabac', 'ecb-tbl-192: I=43'), ('487830e78cc56c1693e64b2a6660c7b6', '6a2026846d8609d60f298a9c0673127f', 'aeafb0b1b3b4b5b6b8b9babbbdbebfc0c2c3c4c5c7c8c9ca', 'ecb-tbl-192: I=44'), ('7a48d6b7b52b29392aa2072a32b66160', '2cb25c005e26efea44336c4c97a4240b', 'cccdcecfd1d2d3d4d6d7d8d9dbdcdddee0e1e2e3e5e6e7e8', 'ecb-tbl-192: I=45'), ('907320e64c8c5314d10f8d7a11c8618d', '496967ab8680ddd73d09a0e4c7dcc8aa', 'eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', 'ecb-tbl-192: I=46'), ('b561f2ca2d6e65a4a98341f3ed9ff533', 'd5af94de93487d1f3a8c577cb84a66a4', '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324', 'ecb-tbl-192: I=47'), ('df769380d212792d026f049e2e3e48ef', '84bdac569cae2828705f267cc8376e90', '262728292b2c2d2e30313233353637383a3b3c3d3f404142', 'ecb-tbl-192: I=48'), ('79f374bc445bdabf8fccb8843d6054c6', 'f7401dda5ad5ab712b7eb5d10c6f99b6', '44454647494a4b4c4e4f50515354555658595a5b5d5e5f60', 'ecb-tbl-192: I=49'), ('4e02f1242fa56b05c68dbae8fe44c9d6', '1c9d54318539ebd4c3b5b7e37bf119f0', '626364656768696a6c6d6e6f71727374767778797b7c7d7e', 'ecb-tbl-192: I=50'), ('cf73c93cbff57ac635a6f4ad2a4a1545', 'aca572d65fb2764cffd4a6eca090ea0d', '80818283858687888a8b8c8d8f90919294959697999a9b9c', 'ecb-tbl-192: I=51'), ('9923548e2875750725b886566784c625', '36d9c627b8c2a886a10ccb36eae3dfbb', '9e9fa0a1a3a4a5a6a8a9aaabadaeafb0b2b3b4b5b7b8b9ba', 'ecb-tbl-192: I=52'), ('4888336b723a022c9545320f836a4207', '010edbf5981e143a81d646e597a4a568', 'bcbdbebfc1c2c3c4c6c7c8c9cbcccdced0d1d2d3d5d6d7d8', 'ecb-tbl-192: I=53'), ('f84d9a5561b0608b1160dee000c41ba8', '8db44d538dc20cc2f40f3067fd298e60', 'dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', 'ecb-tbl-192: I=54'), ('c23192a0418e30a19b45ae3e3625bf22', '930eb53bc71e6ac4b82972bdcd5aafb3', 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314', 'ecb-tbl-192: I=55'), ('b84e0690b28b0025381ad82a15e501a7', '6c42a81edcbc9517ccd89c30c95597b4', '161718191b1c1d1e20212223252627282a2b2c2d2f303132', 'ecb-tbl-192: I=56'), ('acef5e5c108876c4f06269f865b8f0b0', 'da389847ad06df19d76ee119c71e1dd3', '34353637393a3b3c3e3f40414344454648494a4b4d4e4f50', 'ecb-tbl-192: I=57'), ('0f1b3603e0f5ddea4548246153a5e064', 'e018fdae13d3118f9a5d1a647a3f0462', '525354555758595a5c5d5e5f61626364666768696b6c6d6e', 'ecb-tbl-192: I=58'), ('fbb63893450d42b58c6d88cd3c1809e3', '2aa65db36264239d3846180fabdfad20', '70717273757677787a7b7c7d7f80818284858687898a8b8c', 'ecb-tbl-192: I=59'), ('4bef736df150259dae0c91354e8a5f92', '1472163e9a4f780f1ceb44b07ecf4fdb', '8e8f90919394959698999a9b9d9e9fa0a2a3a4a5a7a8a9aa', 'ecb-tbl-192: I=60'), ('7d2d46242056ef13d3c3fc93c128f4c7', 'c8273fdc8f3a9f72e91097614b62397c', 'acadaeafb1b2b3b4b6b7b8b9bbbcbdbec0c1c2c3c5c6c7c8', 'ecb-tbl-192: I=61'), ('e9c1ba2df415657a256edb33934680fd', '66c8427dcd733aaf7b3470cb7d976e3f', 'cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', 'ecb-tbl-192: I=62'), ('e23ee277b0aa0a1dfb81f7527c3514f1', '146131cb17f1424d4f8da91e6f80c1d0', 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304', 'ecb-tbl-192: I=63'), ('3e7445b0b63caaf75e4a911e12106b4c', '2610d0ad83659081ae085266a88770dc', '060708090b0c0d0e10111213151617181a1b1c1d1f202122', 'ecb-tbl-192: I=64'), ('767774752023222544455a5be6e1e0e3', '38a2b5a974b0575c5d733917fb0d4570', '24252627292a2b2c2e2f30313334353638393a3b3d3e3f40', 'ecb-tbl-192: I=65'), ('72737475717e7f7ce9e8ebea696a6b6c', 'e21d401ebc60de20d6c486e4f39a588b', '424344454748494a4c4d4e4f51525354565758595b5c5d5e', 'ecb-tbl-192: I=66'), ('dfdedddc25262728c9c8cfcef1eeefec', 'e51d5f88c670b079c0ca1f0c2c4405a2', '60616263656667686a6b6c6d6f70717274757677797a7b7c', 'ecb-tbl-192: I=67'), ('fffe0100707776755f5e5d5c7675746b', '246a94788a642fb3d1b823c8762380c8', '7e7f80818384858688898a8b8d8e8f90929394959798999a', 'ecb-tbl-192: I=68'), ('e0e1e2e3424140479f9e9190292e2f2c', 'b80c391c5c41a4c3b30c68e0e3d7550f', '9c9d9e9fa1a2a3a4a6a7a8a9abacadaeb0b1b2b3b5b6b7b8', 'ecb-tbl-192: I=69'), ('2120272690efeeed3b3a39384e4d4c4b', 'b77c4754fc64eb9a1154a9af0bb1f21c', 'babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', 'ecb-tbl-192: I=70'), ('ecedeeef5350516ea1a0a7a6a3acadae', 'fb554de520d159a06bf219fc7f34a02f', 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4', 'ecb-tbl-192: I=71'), ('32333c3d25222320e9e8ebeacecdccc3', 'a89fba152d76b4927beed160ddb76c57', 'f6f7f8f9fbfcfdfe00010203050607080a0b0c0d0f101112', 'ecb-tbl-192: I=72'), ('40414243626160678a8bb4b511161714', '5676eab4a98d2e8473b3f3d46424247c', '14151617191a1b1c1e1f20212324252628292a2b2d2e2f30', 'ecb-tbl-192: I=73'), ('94959293f5fafbf81f1e1d1c7c7f7e79', '4e8f068bd7ede52a639036ec86c33568', '323334353738393a3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-192: I=74'), ('bebfbcbd191a1b14cfcec9c8546b6a69', 'f0193c4d7aff1791ee4c07eb4a1824fc', '50515253555657585a5b5c5d5f60616264656667696a6b6c', 'ecb-tbl-192: I=75'), ('2c2d3233898e8f8cbbbab9b8333031ce', 'ac8686eeca9ba761afe82d67b928c33f', '6e6f70717374757678797a7b7d7e7f80828384858788898a', 'ecb-tbl-192: I=76'), ('84858687bfbcbdba37363938fdfafbf8', '5faf8573e33b145b6a369cd3606ab2c9', '8c8d8e8f91929394969798999b9c9d9ea0a1a2a3a5a6a7a8', 'ecb-tbl-192: I=77'), ('828384857669686b909192930b08090e', '31587e9944ab1c16b844ecad0df2e7da', 'aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', 'ecb-tbl-192: I=78'), ('bebfbcbd9695948b707176779e919093', 'd017fecd91148aba37f6f3068aa67d8a', 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4', 'ecb-tbl-192: I=79'), ('8b8a85846067666521202322d0d3d2dd', '788ef2f021a73cba2794b616078a8500', 'e6e7e8e9ebecedeef0f1f2f3f5f6f7f8fafbfcfdfe010002', 'ecb-tbl-192: I=80'), ('76777475f1f2f3f4f8f9e6e777707172', '5d1ef20dced6bcbc12131ac7c54788aa', '04050607090a0b0c0e0f10111314151618191a1b1d1e1f20', 'ecb-tbl-192: I=81'), ('a4a5a2a34f404142b4b5b6b727242522', 'b3c8cf961faf9ea05fdde6d1e4d8f663', '222324252728292a2c2d2e2f31323334363738393b3c3d3e', 'ecb-tbl-192: I=82'), ('94959697e1e2e3ec16171011839c9d9e', '143075c70605861c7fac6526199e459f', '40414243454647484a4b4c4d4f50515254555657595a5b5c', 'ecb-tbl-192: I=83'), ('03023d3c06010003dedfdcddfffcfde2', 'a5ae12eade9a87268d898bfc8fc0252a', '5e5f60616364656668696a6b6d6e6f70727374757778797a', 'ecb-tbl-192: I=84'), ('10111213f1f2f3f4cecfc0c1dbdcddde', '0924f7cf2e877a4819f5244a360dcea9', '7c7d7e7f81828384868788898b8c8d8e9091929395969798', 'ecb-tbl-192: I=85'), ('67666160724d4c4f1d1c1f1e73707176', '3d9e9635afcc3e291cc7ab3f27d1c99a', '9a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', 'ecb-tbl-192: I=86'), ('e6e7e4e5a8abaad584858283909f9e9d', '9d80feebf87510e2b8fb98bb54fd788c', 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4', 'ecb-tbl-192: I=87'), ('71707f7e565150537d7c7f7e6162636c', '5f9d1a082a1a37985f174002eca01309', 'd6d7d8d9dbdcdddee0e1e2e3e5e6e7e8eaebecedeff0f1f2', 'ecb-tbl-192: I=88'), ('64656667212223245555aaaa03040506', 'a390ebb1d1403930184a44b4876646e4', 'f4f5f6f7f9fafbfcfefe01010304050608090a0b0d0e0f10', 'ecb-tbl-192: I=89'), ('9e9f9899aba4a5a6cfcecdcc2b28292e', '700fe918981c3195bb6c4bcb46b74e29', '121314151718191a1c1d1e1f21222324262728292b2c2d2e', 'ecb-tbl-192: I=90'), ('c7c6c5c4d1d2d3dc626364653a454447', '907984406f7bf2d17fb1eb15b673d747', '30313233353637383a3b3c3d3f40414244454647494a4b4c', 'ecb-tbl-192: I=91'), ('f6f7e8e9e0e7e6e51d1c1f1e5b585966', 'c32a956dcfc875c2ac7c7cc8b8cc26e1', '4e4f50515354555658595a5b5d5e5f60626364656768696a', 'ecb-tbl-192: I=92'), ('bcbdbebf5d5e5f5868696667f4f3f2f1', '02646e2ebfa9b820cf8424e9b9b6eb51', '6c6d6e6f71727374767778797b7c7d7e8081828385868788', 'ecb-tbl-192: I=93'), ('40414647b0afaead9b9a99989b98999e', '621fda3a5bbd54c6d3c685816bd4ead8', '8a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', 'ecb-tbl-192: I=94'), ('69686b6a0201001f0f0e0908b4bbbab9', 'd4e216040426dfaf18b152469bc5ac2f', 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4', 'ecb-tbl-192: I=95'), ('c7c6c9c8d8dfdedd5a5b5859bebdbcb3', '9d0635b9d33b6cdbd71f5d246ea17cc8', 'c6c7c8c9cbcccdced0d1d2d3d5d6d7d8dadbdcdddfe0e1e2', 'ecb-tbl-192: I=96'), ('dedfdcdd787b7a7dfffee1e0b2b5b4b7', '10abad1bd9bae5448808765583a2cc1a', 'e4e5e6e7e9eaebeceeeff0f1f3f4f5f6f8f9fafbfdfefe00', 'ecb-tbl-192: I=97'), ('4d4c4b4a606f6e6dd0d1d2d3fbf8f9fe', '6891889e16544e355ff65a793c39c9a8', '020304050708090a0c0d0e0f11121314161718191b1c1d1e', 'ecb-tbl-192: I=98'), ('b7b6b5b4d7d4d5dae5e4e3e2e1fefffc', 'cc735582e68072c163cd9ddf46b91279', '20212223252627282a2b2c2d2f30313234353637393a3b3c', 'ecb-tbl-192: I=99'), ('cecfb0b1f7f0f1f2aeafacad3e3d3c23', 'c5c68b9aeeb7f878df578efa562f9574', '3e3f40414344454648494a4b4d4e4f50525354555758595a', 'ecb-tbl-192: I=100'), ('cacbc8c9cdcecfc812131c1d494e4f4c', '5f4764395a667a47d73452955d0d2ce8', '5c5d5e5f61626364666768696b6c6d6e7071727375767778', 'ecb-tbl-192: I=101'), ('9d9c9b9ad22d2c2fb1b0b3b20c0f0e09', '701448331f66106cefddf1eb8267c357', '7a7b7c7d7f80818284858687898a8b8c8e8f909193949596', 'ecb-tbl-192: I=102'), ('7a7b787964676659959493924f404142', 'cb3ee56d2e14b4e1941666f13379d657', '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4', 'ecb-tbl-192: I=103'), ('aaaba4a5cec9c8cb1f1e1d1caba8a9a6', '9fe16efd18ab6e1981191851fedb0764', 'b6b7b8b9bbbcbdbec0c1c2c3c5c6c7c8cacbcccdcfd0d1d2', 'ecb-tbl-192: I=104'), ('93929190282b2a2dc4c5fafb92959497', '3dc9ba24e1b223589b147adceb4c8e48', 'd4d5d6d7d9dadbdcdedfe0e1e3e4e5e6e8e9eaebedeeeff0', 'ecb-tbl-192: I=105'), ('efeee9e8ded1d0d339383b3a888b8a8d', '1c333032682e7d4de5e5afc05c3e483c', 'f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', 'ecb-tbl-192: I=106'), ('7f7e7d7ca2a1a0af78797e7f112e2f2c', 'd593cc99a95afef7e92038e05a59d00a', '10111213151617181a1b1c1d1f20212224252627292a2b2c', 'ecb-tbl-192: I=107'), ('84859a9b2b2c2d2e868784852625245b', '51e7f96f53b4353923452c222134e1ec', '2e2f30313334353638393a3b3d3e3f40424344454748494a', 'ecb-tbl-192: I=108'), ('b0b1b2b3070405026869666710171615', '4075b357a1a2b473400c3b25f32f81a4', '4c4d4e4f51525354565758595b5c5d5e6061626365666768', 'ecb-tbl-192: I=109'), ('acadaaabbda2a3a00d0c0f0e595a5b5c', '302e341a3ebcd74f0d55f61714570284', '6a6b6c6d6f70717274757677797a7b7c7e7f808183848586', 'ecb-tbl-192: I=110'), ('121310115655544b5253545569666764', '57abdd8231280da01c5042b78cf76522', '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4', 'ecb-tbl-192: I=111'), ('dedfd0d166616063eaebe8e94142434c', '17f9ea7eea17ac1adf0e190fef799e92', 'a6a7a8a9abacadaeb0b1b2b3b5b6b7b8babbbcbdbfc0c1c2', 'ecb-tbl-192: I=112'), ('dbdad9d81417161166677879e0e7e6e5', '2e1bdd563dd87ee5c338dd6d098d0a7a', 'c4c5c6c7c9cacbcccecfd0d1d3d4d5d6d8d9dadbdddedfe0', 'ecb-tbl-192: I=113'), ('6a6b6c6de0efeeed2b2a2928c0c3c2c5', 'eb869996e6f8bfb2bfdd9e0c4504dbb2', 'e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', 'ecb-tbl-192: I=114'), ('b1b0b3b21714151a1a1b1c1d5649484b', 'c2e01549e9decf317468b3e018c61ba8', '00010203050607080a0b0c0d0f10111214151617191a1b1c', 'ecb-tbl-192: I=115'), ('39380706a3a4a5a6c4c5c6c77271706f', '8da875d033c01dd463b244a1770f4a22', '1e1f20212324252628292a2b2d2e2f30323334353738393a', 'ecb-tbl-192: I=116'), ('5c5d5e5f1013121539383736e2e5e4e7', '8ba0dcf3a186844f026d022f8839d696', '3c3d3e3f41424344464748494b4c4d4e5051525355565758', 'ecb-tbl-192: I=117'), ('43424544ead5d4d72e2f2c2d64676661', 'e9691ff9a6cc6970e51670a0fd5b88c1', '5a5b5c5d5f60616264656667696a6b6c6e6f707173747576', 'ecb-tbl-192: I=118'), ('55545756989b9a65f8f9feff18171615', 'f2baec06faeed30f88ee63ba081a6e5b', '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394', 'ecb-tbl-192: I=119'), ('05040b0a525554573c3d3e3f4a494847', '9c39d4c459ae5753394d6094adc21e78', '969798999b9c9d9ea0a1a2a3a5a6a7a8aaabacadafb0b1b2', 'ecb-tbl-192: I=120'), ('14151617595a5b5c8584fbfa8e89888b', '6345b532a11904502ea43ba99c6bd2b2', 'b4b5b6b7b9babbbcbebfc0c1c3c4c5c6c8c9cacbcdcecfd0', 'ecb-tbl-192: I=121'), ('7c7d7a7bfdf2f3f029282b2a51525354', '5ffae3061a95172e4070cedce1e428c8', 'd2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-192: I=122'), ('38393a3b1e1d1c1341404746c23d3c3e', '0a4566be4cdf9adce5dec865b5ab34cd', 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c', 'ecb-tbl-192: I=123'), ('8d8c939240474645818083827c7f7e41', 'ca17fcce79b7404f2559b22928f126fb', '0e0f10111314151618191a1b1d1e1f20222324252728292a', 'ecb-tbl-192: I=124'), ('3b3a39381a19181f32333c3d45424340', '97ca39b849ed73a6470a97c821d82f58', '2c2d2e2f31323334363738393b3c3d3e4041424345464748', 'ecb-tbl-192: I=125'), ('f0f1f6f738272625828380817f7c7d7a', '8198cb06bc684c6d3e9b7989428dcf7a', '4a4b4c4d4f50515254555657595a5b5c5e5f606163646566', 'ecb-tbl-192: I=126'), ('89888b8a0407061966676061141b1a19', 'f53c464c705ee0f28d9a4c59374928bd', '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384', 'ecb-tbl-192: I=127'), ('d3d2dddcaaadacaf9c9d9e9fe8ebeae5', '9adb3d4cca559bb98c3e2ed73dbf1154', '868788898b8c8d8e90919293959697989a9b9c9d9fa0a1a2', 'ecb-tbl-192: I=128'), # ecb_tbl.txt, KEYSIZE=256 ('834eadfccac7e1b30664b1aba44815ab', '1946dabf6a03a2a2c3d0b05080aed6fc', '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', 'ecb-tbl-256: I=1'), ('d9dc4dba3021b05d67c0518f72b62bf1', '5ed301d747d3cc715445ebdec62f2fb4', '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-256: I=2'), ('a291d86301a4a739f7392173aa3c604c', '6585c8f43d13a6beab6419fc5935b9d0', '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', 'ecb-tbl-256: I=3'), ('4264b2696498de4df79788a9f83e9390', '2a5b56a596680fcc0e05f5e0f151ecae', '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-256: I=4'), ('ee9932b3721804d5a83ef5949245b6f6', 'f5d6ff414fd2c6181494d20c37f2b8c4', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', 'ecb-tbl-256: I=5'), ('e6248f55c5fdcbca9cbbb01c88a2ea77', '85399c01f59fffb5204f19f8482f00b8', 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-256: I=6'), ('b8358e41b9dff65fd461d55a99266247', '92097b4c88a041ddf98144bc8d22e8e7', 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', 'ecb-tbl-256: I=7'), ('f0e2d72260af58e21e015ab3a4c0d906', '89bd5b73b356ab412aef9f76cea2d65c', '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', 'ecb-tbl-256: I=8'), ('475b8b823ce8893db3c44a9f2a379ff7', '2536969093c55ff9454692f2fac2f530', '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', 'ecb-tbl-256: I=9'), ('688f5281945812862f5f3076cf80412f', '07fc76a872843f3f6e0081ee9396d637', '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', 'ecb-tbl-256: I=10'), ('08d1d2bc750af553365d35e75afaceaa', 'e38ba8ec2aa741358dcc93e8f141c491', '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', 'ecb-tbl-256: I=11'), ('8707121f47cc3efceca5f9a8474950a1', 'd028ee23e4a89075d0b03e868d7d3a42', 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', 'ecb-tbl-256: I=12'), ('e51aa0b135dba566939c3b6359a980c5', '8cd9423dfc459e547155c5d1d522e540', 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', 'ecb-tbl-256: I=13'), ('069a007fc76a459f98baf917fedf9521', '080e9517eb1677719acf728086040ae3', '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', 'ecb-tbl-256: I=14'), ('726165c1723fbcf6c026d7d00b091027', '7c1700211a3991fc0ecded0ab3e576b0', '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', 'ecb-tbl-256: I=15'), ('d7c544de91d55cfcde1f84ca382200ce', 'dabcbcc855839251db51e224fbe87435', '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', 'ecb-tbl-256: I=16'), ('fed3c9a161b9b5b2bd611b41dc9da357', '68d56fad0406947a4dd27a7448c10f1d', '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', 'ecb-tbl-256: I=17'), ('4f634cdc6551043409f30b635832cf82', 'da9a11479844d1ffee24bbf3719a9925', 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', 'ecb-tbl-256: I=18'), ('109ce98db0dfb36734d9f3394711b4e6', '5e4ba572f8d23e738da9b05ba24b8d81', 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', 'ecb-tbl-256: I=19'), ('4ea6dfaba2d8a02ffdffa89835987242', 'a115a2065d667e3f0b883837a6e903f8', '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', 'ecb-tbl-256: I=20'), ('5ae094f54af58e6e3cdbf976dac6d9ef', '3e9e90dc33eac2437d86ad30b137e66e', '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', 'ecb-tbl-256: I=21'), ('764d8e8e0f29926dbe5122e66354fdbe', '01ce82d8fbcdae824cb3c48e495c3692', 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', 'ecb-tbl-256: I=22'), ('3f0418f888cdf29a982bf6b75410d6a9', '0c9cff163ce936faaf083cfd3dea3117', 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', 'ecb-tbl-256: I=23'), ('e4a3e7cb12cdd56aa4a75197a9530220', '5131ba9bd48f2bba85560680df504b52', '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', 'ecb-tbl-256: I=24'), ('211677684aac1ec1a160f44c4ebf3f26', '9dc503bbf09823aec8a977a5ad26ccb2', '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', 'ecb-tbl-256: I=25'), ('d21e439ff749ac8f18d6d4b105e03895', '9a6db0c0862e506a9e397225884041d7', '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', 'ecb-tbl-256: I=26'), ('d9f6ff44646c4725bd4c0103ff5552a7', '430bf9570804185e1ab6365fc6a6860c', '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', 'ecb-tbl-256: I=27'), ('0b1256c2a00b976250cfc5b0c37ed382', '3525ebc02f4886e6a5a3762813e8ce8a', 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', 'ecb-tbl-256: I=28'), ('b056447ffc6dc4523a36cc2e972a3a79', '07fa265c763779cce224c7bad671027b', 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', 'ecb-tbl-256: I=29'), ('5e25ca78f0de55802524d38da3fe4456', 'e8b72b4e8be243438c9fff1f0e205872', '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', 'ecb-tbl-256: I=30'), ('a5bcf4728fa5eaad8567c0dc24675f83', '109d4f999a0e11ace1f05e6b22cbcb50', '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-256: I=31'), ('814e59f97ed84646b78b2ca022e9ca43', '45a5e8d4c3ed58403ff08d68a0cc4029', '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', 'ecb-tbl-256: I=32'), ('15478beec58f4775c7a7f5d4395514d7', '196865964db3d417b6bd4d586bcb7634', '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-256: I=33'), ('253548ffca461c67c8cbc78cd59f4756', '60436ad45ac7d30d99195f815d98d2ae', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', 'ecb-tbl-256: I=34'), ('fd7ad8d73b9b0f8cc41600640f503d65', 'bb07a23f0b61014b197620c185e2cd75', 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-256: I=35'), ('06199de52c6cbf8af954cd65830bcd56', '5bc0b2850129c854423aff0751fe343b', 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', 'ecb-tbl-256: I=36'), ('f17c4ffe48e44c61bd891e257e725794', '7541a78f96738e6417d2a24bd2beca40', '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', 'ecb-tbl-256: I=37'), ('9a5b4a402a3e8a59be6bf5cd8154f029', 'b0a303054412882e464591f1546c5b9e', '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', 'ecb-tbl-256: I=38'), ('79bd40b91a7e07dc939d441782ae6b17', '778c06d8a355eeee214fcea14b4e0eef', '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', 'ecb-tbl-256: I=39'), ('d8ceaaf8976e5fbe1012d8c84f323799', '09614206d15cbace63227d06db6beebb', '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', 'ecb-tbl-256: I=40'), ('3316e2751e2e388b083da23dd6ac3fbe', '41b97fb20e427a9fdbbb358d9262255d', 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', 'ecb-tbl-256: I=41'), ('8b7cfbe37de7dca793521819242c5816', 'c1940f703d845f957652c2d64abd7adf', 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', 'ecb-tbl-256: I=42'), ('f23f033c0eebf8ec55752662fd58ce68', 'd2d44fcdae5332343366db297efcf21b', '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', 'ecb-tbl-256: I=43'), ('59eb34f6c8bdbacc5fc6ad73a59a1301', 'ea8196b79dbe167b6aa9896e287eed2b', '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', 'ecb-tbl-256: I=44'), ('dcde8b6bd5cf7cc22d9505e3ce81261a', 'd6b0b0c4ba6c7dbe5ed467a1e3f06c2d', '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', 'ecb-tbl-256: I=45'), ('e33cf7e524fed781e7042ff9f4b35dc7', 'ec51eb295250c22c2fb01816fb72bcae', '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', 'ecb-tbl-256: I=46'), ('27963c8facdf73062867d164df6d064c', 'aded6630a07ce9c7408a155d3bd0d36f', 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', 'ecb-tbl-256: I=47'), ('77b1ce386b551b995f2f2a1da994eef8', '697c9245b9937f32f5d1c82319f0363a', 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', 'ecb-tbl-256: I=48'), ('f083388b013679efcf0bb9b15d52ae5c', 'aad5ad50c6262aaec30541a1b7b5b19c', 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314161718191b1c1d1e', 'ecb-tbl-256: I=49'), ('c5009e0dab55db0abdb636f2600290c8', '7d34b893855341ec625bd6875ac18c0d', '20212223252627282a2b2c2d2f30313234353637393a3b3c3e3f404143444546', 'ecb-tbl-256: I=50'), ('7804881e26cd532d8514d3683f00f1b9', '7ef05105440f83862f5d780e88f02b41', '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364666768696b6c6d6e', 'ecb-tbl-256: I=51'), ('46cddcd73d1eb53e675ca012870a92a3', 'c377c06403382061af2c9c93a8e70df6', '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', 'ecb-tbl-256: I=52'), ('a9fb44062bb07fe130a8e8299eacb1ab', '1dbdb3ffdc052dacc83318853abc6de5', '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', 'ecb-tbl-256: I=53'), ('2b6ff8d7a5cc3a28a22d5a6f221af26b', '69a6eab00432517d0bf483c91c0963c7', 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', 'ecb-tbl-256: I=54'), ('1a9527c29b8add4b0e3e656dbb2af8b4', '0797f41dc217c80446e1d514bd6ab197', 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', 'ecb-tbl-256: I=55'), ('7f99cf2c75244df015eb4b0c1050aeae', '9dfd76575902a637c01343c58e011a03', '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', 'ecb-tbl-256: I=56'), ('e84ff85b0d9454071909c1381646c4ed', 'acf4328ae78f34b9fa9b459747cc2658', '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', 'ecb-tbl-256: I=57'), ('89afd40f99521280d5399b12404f6db4', 'b0479aea12bac4fe2384cf98995150c6', '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', 'ecb-tbl-256: I=58'), ('a09ef32dbc5119a35ab7fa38656f0329', '9dd52789efe3ffb99f33b3da5030109a', '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', 'ecb-tbl-256: I=59'), ('61773457f068c376c7829b93e696e716', 'abbb755e4621ef8f1214c19f649fb9fd', 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', 'ecb-tbl-256: I=60'), ('a34f0cae726cce41dd498747d891b967', 'da27fb8174357bce2bed0e7354f380f9', 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', 'ecb-tbl-256: I=61'), ('856f59496c7388ee2d2b1a27b7697847', 'c59a0663f0993838f6e5856593bdc5ef', '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', 'ecb-tbl-256: I=62'), ('cb090c593ef7720bd95908fb93b49df4', 'ed60b264b5213e831607a99c0ce5e57e', '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-256: I=63'), ('a0ac75cd2f1923d460fc4d457ad95baf', 'e50548746846f3eb77b8c520640884ed', '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', 'ecb-tbl-256: I=64'), ('2a2b282974777689e8e9eeef525d5c5f', '28282cc7d21d6a2923641e52d188ef0c', '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-256: I=65'), ('909192939390919e0f0e09089788898a', '0dfa5b02abb18e5a815305216d6d4f8e', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', 'ecb-tbl-256: I=66'), ('777675748d8e8f907170777649464744', '7359635c0eecefe31d673395fb46fb99', 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-256: I=67'), ('717073720605040b2d2c2b2a05fafbf9', '73c679f7d5aef2745c9737bb4c47fb36', 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', 'ecb-tbl-256: I=68'), ('64656667fefdfcc31b1a1d1ca5aaaba8', 'b192bd472a4d2eafb786e97458967626', '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', 'ecb-tbl-256: I=69'), ('dbdad9d86a696867b5b4b3b2c8d7d6d5', '0ec327f6c8a2b147598ca3fde61dc6a4', '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', 'ecb-tbl-256: I=70'), ('5c5d5e5fe3e0e1fe31303736333c3d3e', 'fc418eb3c41b859b38d4b6f646629729', '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', 'ecb-tbl-256: I=71'), ('545556574b48494673727574546b6a69', '30249e5ac282b1c981ea64b609f3a154', '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', 'ecb-tbl-256: I=72'), ('ecedeeefc6c5c4bb56575051f5fafbf8', '5e6e08646d12150776bb43c2d78a9703', 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', 'ecb-tbl-256: I=73'), ('464744452724252ac9c8cfced2cdcccf', 'faeb3d5de652cd3447dceb343f30394a', 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', 'ecb-tbl-256: I=74'), ('e6e7e4e54142435c878681801c131211', 'a8e88706823f6993ef80d05c1c7b2cf0', '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', 'ecb-tbl-256: I=75'), ('72737071cfcccdc2f9f8fffe710e0f0c', '8ced86677e6e00a1a1b15968f2d3cce6', '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', 'ecb-tbl-256: I=76'), ('505152537370714ec3c2c5c4010e0f0c', '9fc7c23858be03bdebb84e90db6786a9', '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', 'ecb-tbl-256: I=77'), ('a8a9aaab5c5f5e51aeafa8a93d222320', 'b4fbd65b33f70d8cf7f1111ac4649c36', '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', 'ecb-tbl-256: I=78'), ('dedfdcddf6f5f4eb10111617fef1f0f3', 'c5c32d5ed03c4b53cc8c1bd0ef0dbbf6', 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', 'ecb-tbl-256: I=79'), ('bdbcbfbe5e5d5c530b0a0d0cfac5c4c7', 'd1a7f03b773e5c212464b63709c6a891', 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', 'ecb-tbl-256: I=80'), ('8a8b8889050606f8f4f5f2f3636c6d6e', '6b7161d8745947ac6950438ea138d028', 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314161718191b1c1d1e', 'ecb-tbl-256: I=81'), ('a6a7a4a54d4e4f40b2b3b4b539262724', 'fd47a9f7e366ee7a09bc508b00460661', '20212223252627282a2b2c2d2f30313234353637393a3b3c3e3f404143444546', 'ecb-tbl-256: I=82'), ('9c9d9e9fe9eaebf40e0f08099b949596', '00d40b003dc3a0d9310b659b98c7e416', '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364666768696b6c6d6e', 'ecb-tbl-256: I=83'), ('2d2c2f2e1013121dcccdcacbed121310', 'eea4c79dcc8e2bda691f20ac48be0717', '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', 'ecb-tbl-256: I=84'), ('f4f5f6f7edeeefd0eaebecedf7f8f9fa', 'e78f43b11c204403e5751f89d05a2509', '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', 'ecb-tbl-256: I=85'), ('3d3c3f3e282b2a2573727574150a0b08', 'd0f0e3d1f1244bb979931e38dd1786ef', 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', 'ecb-tbl-256: I=86'), ('b6b7b4b5f8fbfae5b4b5b2b3a0afaead', '042e639dc4e1e4dde7b75b749ea6f765', 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', 'ecb-tbl-256: I=87'), ('b7b6b5b4989b9a95878681809ba4a5a6', 'bc032fdd0efe29503a980a7d07ab46a8', '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', 'ecb-tbl-256: I=88'), ('a8a9aaabe5e6e798e9e8efee4748494a', '0c93ac949c0da6446effb86183b6c910', '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', 'ecb-tbl-256: I=89'), ('ecedeeefd9dadbd4b9b8bfbe657a7b78', 'e0d343e14da75c917b4a5cec4810d7c2', '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', 'ecb-tbl-256: I=90'), ('7f7e7d7c696a6b74cacbcccd929d9c9f', '0eafb821748408279b937b626792e619', '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', 'ecb-tbl-256: I=91'), ('08090a0b0605040bfffef9f8b9c6c7c4', 'fa1ac6e02d23b106a1fef18b274a553f', 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', 'ecb-tbl-256: I=92'), ('08090a0bf1f2f3ccfcfdfafb68676665', '0dadfe019cd12368075507df33c1a1e9', 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', 'ecb-tbl-256: I=93'), ('cacbc8c93a393837050403020d121310', '3a0879b414465d9ffbaf86b33a63a1b9', '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', 'ecb-tbl-256: I=94'), ('e9e8ebea8281809f8f8e8988343b3a39', '62199fadc76d0be1805d3ba0b7d914bf', '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-256: I=95'), ('515053524645444bd0d1d6d7340b0a09', '1b06d6c5d333e742730130cf78e719b4', '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', 'ecb-tbl-256: I=96'), ('42434041ecefee1193929594c6c9c8cb', 'f1f848824c32e9dcdcbf21580f069329', '78797a7b7d7e7f80828384858788898a8c8d8e8f91929394969798999b9c9d9e', 'ecb-tbl-256: I=97'), ('efeeedecc2c1c0cf76777071455a5b58', '1a09050cbd684f784d8e965e0782f28a', 'a0a1a2a3a5a6a7a8aaabacadafb0b1b2b4b5b6b7b9babbbcbebfc0c1c3c4c5c6', 'ecb-tbl-256: I=98'), ('5f5e5d5c3f3c3d221d1c1b1a19161714', '79c2969e7ded2ba7d088f3f320692360', 'c8c9cacbcdcecfd0d2d3d4d5d7d8d9dadcdddedfe1e2e3e4e6e7e8e9ebecedee', 'ecb-tbl-256: I=99'), ('000102034142434c1c1d1a1b8d727371', '091a658a2f7444c16accb669450c7b63', 'f0f1f2f3f5f6f7f8fafbfcfdfe01000204050607090a0b0c0e0f101113141516', 'ecb-tbl-256: I=100'), ('8e8f8c8db1b2b38c56575051050a0b08', '97c1e3a72cca65fa977d5ed0e8a7bbfc', '18191a1b1d1e1f20222324252728292a2c2d2e2f31323334363738393b3c3d3e', 'ecb-tbl-256: I=101'), ('a7a6a5a4e8ebeae57f7e7978cad5d4d7', '70c430c6db9a17828937305a2df91a2a', '40414243454647484a4b4c4d4f50515254555657595a5b5c5e5f606163646566', 'ecb-tbl-256: I=102'), ('8a8b888994979689454443429f909192', '629553457fbe2479098571c7c903fde8', '68696a6b6d6e6f70727374757778797a7c7d7e7f81828384868788898b8c8d8e', 'ecb-tbl-256: I=103'), ('8c8d8e8fe0e3e2ed45444342f1cecfcc', 'a25b25a61f612669e7d91265c7d476ba', '90919293959697989a9b9c9d9fa0a1a2a4a5a6a7a9aaabacaeafb0b1b3b4b5b6', 'ecb-tbl-256: I=104'), ('fffefdfc4c4f4e31d8d9dedfb6b9b8bb', 'eb7e4e49b8ae0f024570dda293254fed', 'b8b9babbbdbebfc0c2c3c4c5c7c8c9cacccdcecfd1d2d3d4d6d7d8d9dbdcddde', 'ecb-tbl-256: I=105'), ('fdfcfffecccfcec12f2e29286679787b', '38fe15d61cca84516e924adce5014f67', 'e0e1e2e3e5e6e7e8eaebecedeff0f1f2f4f5f6f7f9fafbfcfefe010103040506', 'ecb-tbl-256: I=106'), ('67666564bab9b8a77071767719161714', '3ad208492249108c9f3ebeb167ad0583', '08090a0b0d0e0f10121314151718191a1c1d1e1f21222324262728292b2c2d2e', 'ecb-tbl-256: I=107'), ('9a9b98992d2e2f2084858283245b5a59', '299ba9f9bf5ab05c3580fc26edd1ed12', '30313233353637383a3b3c3d3f40414244454647494a4b4c4e4f505153545556', 'ecb-tbl-256: I=108'), ('a4a5a6a70b0809365c5d5a5b2c232221', '19dc705b857a60fb07717b2ea5717781', '58595a5b5d5e5f60626364656768696a6c6d6e6f71727374767778797b7c7d7e', 'ecb-tbl-256: I=109'), ('464744455754555af3f2f5f4afb0b1b2', 'ffc8aeb885b5efcad06b6dbebf92e76b', '80818283858687888a8b8c8d8f90919294959697999a9b9c9e9fa0a1a3a4a5a6', 'ecb-tbl-256: I=110'), ('323330317675746b7273747549464744', 'f58900c5e0b385253ff2546250a0142b', 'a8a9aaabadaeafb0b2b3b4b5b7b8b9babcbdbebfc1c2c3c4c6c7c8c9cbcccdce', 'ecb-tbl-256: I=111'), ('a8a9aaab181b1a15808186872b141516', '2ee67b56280bc462429cee6e3370cbc1', 'd0d1d2d3d5d6d7d8dadbdcdddfe0e1e2e4e5e6e7e9eaebeceeeff0f1f3f4f5f6', 'ecb-tbl-256: I=112'), ('e7e6e5e4202323ddaaabacad343b3a39', '20db650a9c8e9a84ab4d25f7edc8f03f', 'f8f9fafbfdfefe00020304050708090a0c0d0e0f11121314161718191b1c1d1e', 'ecb-tbl-256: I=113'), ('a8a9aaab2221202fedecebea1e010003', '3c36da169525cf818843805f25b78ae5', '20212223252627282a2b2c2d2f30313234353637393a3b3c3e3f404143444546', 'ecb-tbl-256: I=114'), ('f9f8fbfa5f5c5d42424344450e010003', '9a781d960db9e45e37779042fea51922', '48494a4b4d4e4f50525354555758595a5c5d5e5f61626364666768696b6c6d6e', 'ecb-tbl-256: I=115'), ('57565554f5f6f7f89697909120dfdedd', '6560395ec269c672a3c288226efdba77', '70717273757677787a7b7c7d7f80818284858687898a8b8c8e8f909193949596', 'ecb-tbl-256: I=116'), ('f8f9fafbcccfcef1dddcdbda0e010003', '8c772b7a189ac544453d5916ebb27b9a', '98999a9b9d9e9fa0a2a3a4a5a7a8a9aaacadaeafb1b2b3b4b6b7b8b9bbbcbdbe', 'ecb-tbl-256: I=117'), ('d9d8dbda7073727d80818687c2dddcdf', '77ca5468cc48e843d05f78eed9d6578f', 'c0c1c2c3c5c6c7c8cacbcccdcfd0d1d2d4d5d6d7d9dadbdcdedfe0e1e3e4e5e6', 'ecb-tbl-256: I=118'), ('c5c4c7c6080b0a1588898e8f68676665', '72cdcc71dc82c60d4429c9e2d8195baa', 'e8e9eaebedeeeff0f2f3f4f5f7f8f9fafcfdfeff01020304060708090b0c0d0e', 'ecb-tbl-256: I=119'), ('83828180dcdfded186878081f0cfcecd', '8080d68ce60e94b40b5b8b69eeb35afa', '10111213151617181a1b1c1d1f20212224252627292a2b2c2e2f303133343536', 'ecb-tbl-256: I=120'), ('98999a9bdddedfa079787f7e0a050407', '44222d3cde299c04369d58ac0eba1e8e', '38393a3b3d3e3f40424344454748494a4c4d4e4f51525354565758595b5c5d5e', 'ecb-tbl-256: I=121'), ('cecfcccd4f4c4d429f9e9998dfc0c1c2', '9b8721b0a8dfc691c5bc5885dbfcb27a', '60616263656667686a6b6c6d6f70717274757677797a7b7c7e7f808183848586', 'ecb-tbl-256: I=122'), ('404142436665647b29282f2eaba4a5a6', '0dc015ce9a3a3414b5e62ec643384183', '88898a8b8d8e8f90929394959798999a9c9d9e9fa1a2a3a4a6a7a8a9abacadae', 'ecb-tbl-256: I=123'), ('33323130e6e5e4eb23222524dea1a0a3', '705715448a8da412025ce38345c2a148', 'b0b1b2b3b5b6b7b8babbbcbdbfc0c1c2c4c5c6c7c9cacbcccecfd0d1d3d4d5d6', 'ecb-tbl-256: I=124'), ('cfcecdccf6f5f4cbe6e7e0e199969794', 'c32b5b0b6fbae165266c569f4b6ecf0b', 'd8d9dadbdddedfe0e2e3e4e5e7e8e9eaecedeeeff1f2f3f4f6f7f8f9fbfcfdfe', 'ecb-tbl-256: I=125'), ('babbb8b97271707fdcdddadb29363734', '4dca6c75192a01ddca9476af2a521e87', '00010203050607080a0b0c0d0f10111214151617191a1b1c1e1f202123242526', 'ecb-tbl-256: I=126'), ('c9c8cbca4447465926272021545b5a59', '058691e627ecbc36ac07b6db423bd698', '28292a2b2d2e2f30323334353738393a3c3d3e3f41424344464748494b4c4d4e', 'ecb-tbl-256: I=127'), ('050407067477767956575051221d1c1f', '7444527095838fe080fc2bcdd30847eb', '50515253555657585a5b5c5d5f60616264656667696a6b6c6e6f707173747576', 'ecb-tbl-256: I=128'), # FIPS PUB 800-38A test vectors, 2001 edition. Annex F. ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '3ad77bb40d7a3660a89ecaf32466ef97'+'f5d3d58503b9699de785895a96fdbaaf'+ '43b1cd7f598ece23881b00e3ed030688'+'7b0c785e27e8ad3f8223207104725dd4', '2b7e151628aed2a6abf7158809cf4f3c', 'NIST 800-38A, F.1.1, ECB and AES-128'), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', 'bd334f1d6e45f25ff712a214571fa5cc'+'974104846d0ad3ad7734ecb3ecee4eef'+ 'ef7afd2270e2e60adce0ba2face6444e'+'9a4b41ba738d6c72fb16691603c18e0e', '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b', 'NIST 800-38A, F.1.3, ECB and AES-192'), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', 'f3eed1bdb5d2a03c064b5a7e3db181f8'+'591ccb10d410ed26dc5ba74a31362870'+ 'b6ed21b99ca6f4f9f153e7b1beafed1d'+'23304b7a39f9f3ff067d8d8f9e24ecc7', '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4', 'NIST 800-38A, F.1.3, ECB and AES-256'), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '7649abac8119b246cee98e9b12e9197d'+'5086cb9b507219ee95db113a917678b2'+ '73bed6b8e3c1743b7116e69e22229516'+'3ff1caa1681fac09120eca307586e1a7', '2b7e151628aed2a6abf7158809cf4f3c', 'NIST 800-38A, F.2.1, CBC and AES-128', dict(mode='CBC', iv='000102030405060708090a0b0c0d0e0f')), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '4f021db243bc633d7178183a9fa071e8'+'b4d9ada9ad7dedf4e5e738763f69145a'+ '571b242012fb7ae07fa9baac3df102e0'+'08b0e27988598881d920a9e64f5615cd', '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b', 'NIST 800-38A, F.2.1, CBC and AES-192', dict(mode='CBC', iv='000102030405060708090a0b0c0d0e0f')), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', 'f58c4c04d6e5f1ba779eabfb5f7bfbd6'+'9cfc4e967edb808d679f777bc6702c7d'+ '39f23369a9d9bacfa530e26304231461'+'b2eb05e2c39be9fcda6c19078c6a9d1b', '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4', 'NIST 800-38A, F.2.1, CBC and AES-256', dict(mode='CBC', iv='000102030405060708090a0b0c0d0e0f')), # Skip CFB-1 since it is not supported by PyCrypto ('6bc1bee22e409f96e93d7e117393172aae2d','3b79424c9c0dd436bace9e0ed4586a4f32b9', '2b7e151628aed2a6abf7158809cf4f3c', 'NIST 800-38A, F.3.7, CFB-8 and AES-128', dict(mode='CFB', iv='000102030405060708090a0b0c0d0e0f', segment_size=8)), ('6bc1bee22e409f96e93d7e117393172aae2d','cda2521ef0a905ca44cd057cbf0d47a0678a', '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b', 'NIST 800-38A, F.3.9, CFB-8 and AES-192', dict(mode='CFB', iv='000102030405060708090a0b0c0d0e0f', segment_size=8)), ('6bc1bee22e409f96e93d7e117393172aae2d','dc1f1a8520a64db55fcc8ac554844e889700', '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4', 'NIST 800-38A, F.3.11, CFB-8 and AES-256', dict(mode='CFB', iv='000102030405060708090a0b0c0d0e0f', segment_size=8)), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '3b3fd92eb72dad20333449f8e83cfb4a'+'c8a64537a0b3a93fcde3cdad9f1ce58b'+ '26751f67a3cbb140b1808cf187a4f4df'+'c04b05357c5d1c0eeac4c66f9ff7f2e6', '2b7e151628aed2a6abf7158809cf4f3c', 'NIST 800-38A, F.3.13, CFB-128 and AES-128', dict(mode='CFB', iv='000102030405060708090a0b0c0d0e0f', segment_size=128)), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', 'cdc80d6fddf18cab34c25909c99a4174'+'67ce7f7f81173621961a2b70171d3d7a'+ '2e1e8a1dd59b88b1c8e60fed1efac4c9'+'c05f9f9ca9834fa042ae8fba584b09ff', '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b', 'NIST 800-38A, F.3.15, CFB-128 and AES-192', dict(mode='CFB', iv='000102030405060708090a0b0c0d0e0f', segment_size=128)), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', 'dc7e84bfda79164b7ecd8486985d3860'+'39ffed143b28b1c832113c6331e5407b'+ 'df10132415e54b92a13ed0a8267ae2f9'+'75a385741ab9cef82031623d55b1e471', '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4', 'NIST 800-38A, F.3.17, CFB-128 and AES-256', dict(mode='CFB', iv='000102030405060708090a0b0c0d0e0f', segment_size=128)), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '3b3fd92eb72dad20333449f8e83cfb4a'+'7789508d16918f03f53c52dac54ed825'+ '9740051e9c5fecf64344f7a82260edcc'+'304c6528f659c77866a510d9c1d6ae5e', '2b7e151628aed2a6abf7158809cf4f3c', 'NIST 800-38A, F.4.1, OFB and AES-128', dict(mode='OFB', iv='000102030405060708090a0b0c0d0e0f')), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', 'cdc80d6fddf18cab34c25909c99a4174'+'fcc28b8d4c63837c09e81700c1100401'+ '8d9a9aeac0f6596f559c6d4daf59a5f2'+'6d9f200857ca6c3e9cac524bd9acc92a', '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b', 'NIST 800-38A, F.4.3, OFB and AES-192', dict(mode='OFB', iv='000102030405060708090a0b0c0d0e0f')), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', 'dc7e84bfda79164b7ecd8486985d3860'+'4febdc6740d20b3ac88f6ad82a4fb08d'+ '71ab47a086e86eedf39d1c5bba97c408'+'0126141d67f37be8538f5a8be740e484', '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4', 'NIST 800-38A, F.4.5, OFB and AES-256', dict(mode='OFB', iv='000102030405060708090a0b0c0d0e0f')), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '874d6191b620e3261bef6864990db6ce'+'9806f66b7970fdff8617187bb9fffdff'+ '5ae4df3edbd5d35e5b4f09020db03eab'+'1e031dda2fbe03d1792170a0f3009cee', '2b7e151628aed2a6abf7158809cf4f3c', 'NIST 800-38A, F.5.1, CTR and AES-128', dict(mode='CTR', ctr_params=dict(nbits=16, prefix='f0f1f2f3f4f5f6f7f8f9fafbfcfd', initial_value=0xfeff))), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '1abc932417521ca24f2b0459fe7e6e0b'+'090339ec0aa6faefd5ccc2c6f4ce8e94'+ '1e36b26bd1ebc670d1bd1d665620abf7'+'4f78a7f6d29809585a97daec58c6b050', '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b', 'NIST 800-38A, F.5.3, CTR and AES-192', dict(mode='CTR', ctr_params=dict(nbits=16, prefix='f0f1f2f3f4f5f6f7f8f9fafbfcfd', initial_value=0xfeff))), ('6bc1bee22e409f96e93d7e117393172a'+'ae2d8a571e03ac9c9eb76fac45af8e51'+ '30c81c46a35ce411e5fbc1191a0a52ef'+'f69f2445df4f9b17ad2b417be66c3710', '601ec313775789a5b7a7f504bbf3d228'+'f443e3ca4d62b59aca84e990cacaf5c5'+ '2b0930daa23de94ce87017ba2d84988d'+'dfc9c58db67aada613c2dd08457941a6', '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4', 'NIST 800-38A, F.5.5, CTR and AES-256', dict(mode='CTR', ctr_params=dict(nbits=16, prefix='f0f1f2f3f4f5f6f7f8f9fafbfcfd', initial_value=0xfeff))), # RFC 3686 test vectors # This is a list of (plaintext, ciphertext, key[, description[, params]]) tuples. ('53696e676c6520626c6f636b206d7367', 'e4095d4fb7a7b3792d6175a3261311b8', 'ae6852f8121067cc4bf7a5765577f39e', 'RFC 3686 Test Vector #1: Encrypting 16 octets using AES-CTR with 128-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='00000030'+'0000000000000000'))), ('000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f', '5104a106168a72d9790d41ee8edad388eb2e1efc46da57c8fce630df9141be28', '7e24067817fae0d743d6ce1f32539163', 'RFC 3686 Test Vector #2: Encrypting 32 octets using AES-CTR with 128-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='006cb6db'+'c0543b59da48d90b'))), ('000102030405060708090a0b0c0d0e0f'+'101112131415161718191a1b1c1d1e1f'+'20212223', 'c1cf48a89f2ffdd9cf4652e9efdb72d7'+'4540a42bde6d7836d59a5ceaaef31053'+'25b2072f', '7691be035e5020a8ac6e618529f9a0dc', 'RFC 3686 Test Vector #3: Encrypting 36 octets using AES-CTR with 128-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='00e0017b'+'27777f3f4a1786f0'))), ('53696e676c6520626c6f636b206d7367', '4b55384fe259c9c84e7935a003cbe928', '16af5b145fc9f579c175f93e3bfb0eed'+'863d06ccfdb78515', 'RFC 3686 Test Vector #4: Encrypting 16 octets using AES-CTR with 192-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='00000048'+'36733c147d6d93cb'))), ('000102030405060708090a0b0c0d0e0f'+'101112131415161718191a1b1c1d1e1f', '453243fc609b23327edfaafa7131cd9f'+'8490701c5ad4a79cfc1fe0ff42f4fb00', '7c5cb2401b3dc33c19e7340819e0f69c'+'678c3db8e6f6a91a', 'RFC 3686 Test Vector #5: Encrypting 32 octets using AES-CTR with 192-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='0096b03b'+'020c6eadc2cb500d'))), ('000102030405060708090a0b0c0d0e0f'+'101112131415161718191a1b1c1d1e1f'+'20212223', '96893fc55e5c722f540b7dd1ddf7e758'+'d288bc95c69165884536c811662f2188'+'abee0935', '02bf391ee8ecb159b959617b0965279b'+'f59b60a786d3e0fe', 'RFC 3686 Test Vector #6: Encrypting 36 octets using AES-CTR with 192-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='0007bdfd'+'5cbd60278dcc0912'))), ('53696e676c6520626c6f636b206d7367', '145ad01dbf824ec7560863dc71e3e0c0', '776beff2851db06f4c8a0542c8696f6c'+'6a81af1eec96b4d37fc1d689e6c1c104', 'RFC 3686 Test Vector #7: Encrypting 16 octets using AES-CTR with 256-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='00000060'+'db5672c97aa8f0b2'))), ('000102030405060708090a0b0c0d0e0f'+'101112131415161718191a1b1c1d1e1f', 'f05e231b3894612c49ee000b804eb2a9'+'b8306b508f839d6a5530831d9344af1c', 'f6d66d6bd52d59bb0796365879eff886'+'c66dd51a5b6a99744b50590c87a23884', 'RFC 3686 Test Vector #8: Encrypting 32 octets using AES-CTR with 256-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='00faac24'+'c1585ef15a43d875'))), ('000102030405060708090a0b0c0d0e0f'+'101112131415161718191a1b1c1d1e1f'+'20212223', 'eb6c52821d0bbbf7ce7594462aca4faa'+'b407df866569fd07f48cc0b583d6071f'+'1ec0e6b8', 'ff7a617ce69148e4f1726e2f43581de2'+'aa62d9f805532edff1eed687fb54153d', 'RFC 3686 Test Vector #9: Encrypting 36 octets using AES-CTR with 256-bit key', dict(mode='CTR', ctr_params=dict(nbits=32, prefix='001cc5b7'+'51a51d70a1c11148'))), # The following test vectors have been generated with gpg v1.4.0. # The command line used was: # # gpg -c -z 0 --cipher-algo AES --passphrase secret_passphrase \ # --disable-mdc --s2k-mode 0 --output ct pt # # As result, the content of the file 'pt' is encrypted with a key derived # from 'secret_passphrase' and written to file 'ct'. # Test vectors must be extracted from 'ct', which is a collection of # TLVs (see RFC4880 for all details): # - the encrypted data (with the encrypted IV as prefix) is the payload # of the TLV with tag 9 (Symmetrical Encrypted Data Packet). # This is the ciphertext in the test vector. # - inside the encrypted part, there is a further layer of TLVs. One must # look for tag 11 (Literal Data Packet); in its payload, after a short # but time dependent header, there is the content of file 'pt'. # In the test vector, the plaintext is the complete set of TLVs that gets # encrypted. It is not just the content of 'pt'. # - the key is the leftmost 16 bytes of the SHA1 digest of the password. # The test vector contains such shortened digest. # # Note that encryption uses a clear IV, and decryption an encrypted IV ( 'ac18620270744fb4f647426c61636b4361745768697465436174', # Plaintext, 'BlackCatWhiteCat' 'dc6b9e1f095de609765c59983db5956ae4f63aea7405389d2ebb', # Ciphertext '5baa61e4c9b93f3f0682250b6cf8331b', # Key (hash of 'password') 'GPG Test Vector #1', dict(mode='OPENPGP', iv='3d7d3e62282add7eb203eeba5c800733', encrypted_iv='fd934601ef49cb58b6d9aebca6056bdb96ef' ) ), ] def get_tests(config={}): from Crypto.Cipher import AES from common import make_block_tests return make_block_tests(AES, "AES", test_data) if __name__ == '__main__': import unittest suite = lambda: unittest.TestSuite(get_tests()) unittest.main(defaultTest='suite') # vim:set ts=4 sw=4 sts=4 expandtab:
apache-2.0
hujiajie/chromium-crosswalk
tools/telemetry/telemetry/internal/platform/profiler/strace_profiler.py
19
8015
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import re import signal import subprocess import sys import tempfile from telemetry.internal.platform import profiler from telemetry.timeline import model from telemetry.timeline import trace_data as trace_data_module # Parses one line of strace output, for example: # 6052 1311456063.159722 read(8, "\1\0\0\0\0\0\0\0", 8) = 8 <0.000022> _STRACE_LINE_RE = re.compile( r'^(?P<tid>\d+)\s+' r'(?P<ts>\d+)' r'(?P<micro>.\d+)\s+' r'(?P<func>.*?)' r'[(](?P<args>.*?)[)]\s+=\s+' r'(?P<ret>.*?)\s+' r'<(?P<dur>[\d.]+)>$') _UNFINISHED_LINE_RE = re.compile( r'^(?P<tid>\d+)\s+' r'(?P<line>.*?)' r'<unfinished ...>$') _RESUMED_LINE_RE = re.compile( r'^(?P<tid>\d+)\s+' r'(?P<ts>\d+)' r'(?P<micro>.\d+)\s+' r'<[.][.][.]\s(?P<func>.*?)\sresumed>' r'(?P<line>.*?)$') _KILLED_LINE_RE = re.compile( r'^(?P<tid>\d+)\s+' r'(?P<ts>\d+)' r'(?P<micro>.\d+)\s+' r'[+][+][+] killed by SIGKILL [+][+][+]$') def _StraceToChromeTrace(pid, infile): """Returns chrometrace json format for |infile| strace output.""" # Map of fd:file_name for open file descriptors. Useful for displaying # file name instead of the descriptor number. fd_map = {} # Map of tid:interrupted_call for the interrupted call on each thread. It is # possible to context switch during a system call. In this case we must # match up the lines. interrupted_call_map = {} out = [] with open(infile, 'r') as f: for line in f.readlines(): # Ignore kill lines for now. m = _KILLED_LINE_RE.match(line) if m: continue # If this line is interrupted, then remember it and continue. m = _UNFINISHED_LINE_RE.match(line) if m: assert m.group('tid') not in interrupted_call_map interrupted_call_map[m.group('tid')] = line continue # If this is a resume of a previous line, stitch it together. interrupted = False m = _RESUMED_LINE_RE.match(line) if m: interrupted = True assert m.group('tid') in interrupted_call_map line = interrupted_call_map[m.group('tid')].replace( '<unfinished ...>', m.group('line')) del interrupted_call_map[m.group('tid')] # At this point we can do a normal match. m = _STRACE_LINE_RE.match(line) if not m: if ('exit' not in line and 'Profiling timer expired' not in line and '<unavailable>' not in line): logging.warn('Failed to parse line: %s' % line) continue ts_begin = int(1000000 * (int(m.group('ts')) + float(m.group('micro')))) ts_end = ts_begin + int(1000000 * float(m.group('dur'))) tid = int(m.group('tid')) function_name = unicode(m.group('func'), errors='ignore') function_args = unicode(m.group('args'), errors='ignore') ret = unicode(m.group('ret'), errors='ignore') cat = 'strace' possible_fd_arg = None first_arg = function_args.split(',')[0] if first_arg and first_arg.strip().isdigit(): possible_fd_arg = first_arg.strip() if function_name == 'open' and ret.isdigit(): # 1918 1311606151.649379 open("/foo/bar.so", O_RDONLY) = 7 <0.000088> fd_map[ret] = first_arg args = { 'args': function_args, 'ret': ret, } if interrupted: args['interrupted'] = True if possible_fd_arg and possible_fd_arg in fd_map: args['fd%s' % first_arg] = fd_map[possible_fd_arg] out.append({ 'cat': cat, 'pid': pid, 'tid': tid, 'ts': ts_begin, 'ph': 'B', # Begin 'name': function_name, }) out.append({ 'cat': cat, 'pid': pid, 'tid': tid, 'ts': ts_end, 'ph': 'E', # End 'name': function_name, 'args': args, }) return out def _GenerateTraceMetadata(timeline_model): out = [] for process in timeline_model.processes: out.append({ 'name': 'process_name', 'ph': 'M', # Metadata 'pid': process, 'args': { 'name': timeline_model.processes[process].name } }) for thread in timeline_model.processes[process].threads: out.append({ 'name': 'thread_name', 'ph': 'M', # Metadata 'pid': process, 'tid': thread, 'args': { 'name': timeline_model.processes[process].threads[thread].name } }) return out class _SingleProcessStraceProfiler(object): """An internal class for using perf for a given process.""" def __init__(self, pid, output_file, platform_backend): self._pid = pid self._platform_backend = platform_backend self._output_file = output_file self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0) self._proc = subprocess.Popen( ['strace', '-ttt', '-f', '-T', '-p', str(pid), '-o', output_file], stdout=self._tmp_output_file, stderr=subprocess.STDOUT) def CollectProfile(self): if ('renderer' in self._output_file and not self._platform_backend.GetCommandLine(self._pid)): logging.warning('Renderer was swapped out during profiling. ' 'To collect a full profile rerun with ' '"--extra-browser-args=--single-process"') self._proc.send_signal(signal.SIGINT) exit_code = self._proc.wait() try: if exit_code: raise Exception('strace failed with exit code %d. Output:\n%s' % ( exit_code, self._GetStdOut())) finally: self._tmp_output_file.close() return _StraceToChromeTrace(self._pid, self._output_file) def _GetStdOut(self): self._tmp_output_file.flush() try: with open(self._tmp_output_file.name) as f: return f.read() except IOError: return '' class StraceProfiler(profiler.Profiler): def __init__(self, browser_backend, platform_backend, output_path, state): super(StraceProfiler, self).__init__( browser_backend, platform_backend, output_path, state) assert self._browser_backend.supports_tracing self._browser_backend.browser.StartTracing(None, timeout=10) process_output_file_map = self._GetProcessOutputFileMap() self._process_profilers = [] self._output_file = output_path + '.json' for pid, output_file in process_output_file_map.iteritems(): if 'zygote' in output_file: continue self._process_profilers.append( _SingleProcessStraceProfiler(pid, output_file, platform_backend)) @classmethod def name(cls): return 'strace' @classmethod def is_supported(cls, browser_type): if sys.platform != 'linux2': return False # TODO(tonyg): This should be supported on android and cros. if (browser_type.startswith('android') or browser_type.startswith('cros')): return False return True @classmethod def CustomizeBrowserOptions(cls, browser_type, options): options.AppendExtraBrowserArgs([ '--no-sandbox', '--allow-sandbox-debugging' ]) def CollectProfile(self): print 'Processing trace...' out_json = [] for single_process in self._process_profilers: out_json.extend(single_process.CollectProfile()) trace_data_builder = trace_data_module.TraceDataBuilder() self._browser_backend.browser.StopTracing(trace_data_builder) timeline_model = model.TimelineModel(trace_data_builder.AsData()) out_json.extend(_GenerateTraceMetadata(timeline_model)) with open(self._output_file, 'w') as f: f.write(json.dumps(out_json, separators=(',', ':'))) print 'Trace saved as %s' % self._output_file print 'To view, open in chrome://tracing' return [self._output_file]
bsd-3-clause
kaixinjxq/crosswalk-test-suite
misc/webapi-service-docroot-tests/inst.py
19
2515
#!/usr/bin/env python import os import shutil import glob import time import sys import subprocess from optparse import OptionParser, make_option SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) PARAMETERS = None ADB_CMD = "adb" def doCMD(cmd): # Do not need handle timeout in this short script, let tool do it print "-->> \"%s\"" % cmd output = [] cmd_return_code = 1 cmd_proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) while True: output_line = cmd_proc.stdout.readline().strip("\r\n") cmd_return_code = cmd_proc.poll() if output_line == '' and cmd_return_code is not None: break sys.stdout.write("%s\n" % output_line) sys.stdout.flush() output.append(output_line) return (cmd_return_code, output) def uninstPKGs(): action_status = True cmd = "%s -s %s shell rm /sdcard/docroot.zip" % ( ADB_CMD, PARAMETERS.device) (return_code, output) = doCMD(cmd) return action_status def instPKGs(): action_status = True cmd = "%s -s %s push %s/docroot.zip /sdcard/" % ( ADB_CMD, PARAMETERS.device, SCRIPT_DIR) (return_code, output) = doCMD(cmd) return action_status def main(): try: usage = "usage: inst.py -i" opts_parser = OptionParser(usage=usage) opts_parser.add_option( "-s", dest="device", action="store", help="Specify device") opts_parser.add_option( "-i", dest="binstpkg", action="store_true", help="Install package") opts_parser.add_option( "-u", dest="buninstpkg", action="store_true", help="Uninstall package") global PARAMETERS (PARAMETERS, args) = opts_parser.parse_args() except Exception as e: print "Got wrong option: %s, exit ..." % e sys.exit(1) if not PARAMETERS.device: (return_code, output) = doCMD("adb devices") for line in output: if str.find(line, "\tdevice") != -1: PARAMETERS.device = line.split("\t")[0] break if not PARAMETERS.device: print "No device found" sys.exit(1) if PARAMETERS.binstpkg and PARAMETERS.buninstpkg: print "-i and -u are conflict" sys.exit(1) if PARAMETERS.buninstpkg: if not uninstPKGs(): sys.exit(1) else: if not instPKGs(): sys.exit(1) if __name__ == "__main__": main() sys.exit(0)
bsd-3-clause
n9code/calm
calm/testing.py
1
4809
""" This is the testing module for Calm applications. This defines a handy subclass with its utilities, so that you can use them to test your Calm applications more conveniently and with less code. """ import json from tornado.testing import AsyncHTTPTestCase from tornado.websocket import websocket_connect from calm.core import CalmApp class CalmHTTPTestCase(AsyncHTTPTestCase): """ This is the base class to inherit in order to test your Calm app. You may use this to test only the HTTP part of your application. """ def get_calm_app(self): """ This method needs to be implemented by the user. Simply return an instance of your Calm application so that Calm will know what are you testing. """ pass # pragma: no cover def get_app(self): """This one is for Tornado, returns the app under test.""" calm_app = self.get_calm_app() if calm_app is None or not isinstance(calm_app, CalmApp): raise NotImplementedError( # pragma: no cover "Please implement CalmTestCase.get_calm_app()" ) return calm_app.make_app() def _request(self, url, *args, expected_code=200, expected_body=None, expected_json_body=None, query_args=None, json_body=None, **kwargs): """ Makes a request to the `url` of the app and makes assertions. """ # generate the query fragment of the URL if query_args is None: query_string = '' else: query_args_kv = ['='.join( [k, str(v)] ) for k, v in query_args.items()] query_string = '&'.join(query_args_kv) if query_string: url = url + '?' + query_string if ((kwargs.get('body') or json_body) and kwargs['method'] not in ('POST', 'PUT')): raise Exception( # pragma: no cover "Cannot send body with methods other than POST and PUT" ) if not kwargs.get('body'): if kwargs['method'] in ('POST', 'PUT'): if json_body: kwargs['body'] = json.dumps(json_body) else: kwargs['body'] = '{}' resp = self.fetch(url, *args, **kwargs) actual_code = resp.code self.assertEqual(actual_code, expected_code) if expected_body: self.assertEqual(resp.body.decode('utf-8'), expected_body) # pragma: no cover if expected_json_body: actual_json_body = json.loads(resp.body.decode('utf-8')) self.assertEqual(expected_json_body, actual_json_body) return resp def get(self, url, *args, **kwargs): """Makes a `GET` request to the `url` of your app.""" kwargs.update(method='GET') return self._request(url, *args, **kwargs) def post(self, url, *args, **kwargs): """Makes a `POST` request to the `url` of your app.""" kwargs.update(method='POST') return self._request(url, *args, **kwargs) def put(self, url, *args, **kwargs): """Makes a `PUT` request to the `url` of your app.""" kwargs.update(method='PUT') return self._request(url, *args, **kwargs) def delete(self, url, *args, **kwargs): """Makes a `DELETE` request to the `url` of your app.""" kwargs.update(method='DELETE') return self._request(url, *args, **kwargs) class CalmWebSocketTestCase(AsyncHTTPTestCase): """ This is the base class to inherit in order to test your WS handlers. """ def __init__(self, *args, **kwargs): super(CalmWebSocketTestCase, self).__init__(*args, **kwargs) self._websocket = None def get_calm_app(self): """ This method needs to be implemented by the user. Simply return an instance of your Calm application so that Calm will know what are you testing. """ pass # pragma: no cover def get_app(self): """This one is for Tornado, returns the app under test.""" calm_app = self.get_calm_app() if calm_app is None or not isinstance(calm_app, CalmApp): raise NotImplementedError( # pragma: no cover "Please implement CalmTestCase.get_calm_app()" ) return calm_app.make_app() async def init_websocket(self, url): """Initiate a new WebSocket connection.""" self._websocket = await websocket_connect(self.get_url(url)) return self._websocket def get_protocol(self): """Override for `get_url` to return with schema `ws://`""" return 'ws'
mit
seann1/portfolio5
.meteor/dev_bundle/lib/node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py
1789
10585
# Copyright (c) 2014 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode-ninja wrapper project file generator. This updates the data structures passed to the Xcode gyp generator to build with ninja instead. The Xcode project itself is transformed into a list of executable targets, each with a build step to build with ninja, and a target with every source and resource file. This appears to sidestep some of the major performance headaches experienced using complex projects and large number of targets within Xcode. """ import errno import gyp.generator.ninja import os import re import xml.sax.saxutils def _WriteWorkspace(main_gyp, sources_gyp, params): """ Create a workspace to wrap main and sources gyp paths. """ (build_file_root, build_file_ext) = os.path.splitext(main_gyp) workspace_path = build_file_root + '.xcworkspace' options = params['options'] if options.generator_output: workspace_path = os.path.join(options.generator_output, workspace_path) try: os.makedirs(workspace_path) except OSError, e: if e.errno != errno.EEXIST: raise output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \ '<Workspace version = "1.0">\n' for gyp_name in [main_gyp, sources_gyp]: name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj' name = xml.sax.saxutils.quoteattr("group:" + name) output_string += ' <FileRef location = %s></FileRef>\n' % name output_string += '</Workspace>\n' workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata") try: with open(workspace_file, 'r') as input_file: input_string = input_file.read() if input_string == output_string: return except IOError: # Ignore errors if the file doesn't exist. pass with open(workspace_file, 'w') as output_file: output_file.write(output_string) def _TargetFromSpec(old_spec, params): """ Create fake target for xcode-ninja wrapper. """ # Determine ninja top level build dir (e.g. /path/to/out). ninja_toplevel = None jobs = 0 if params: options = params['options'] ninja_toplevel = \ os.path.join(options.toplevel_dir, gyp.generator.ninja.ComputeOutputDir(params)) jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0) target_name = old_spec.get('target_name') product_name = old_spec.get('product_name', target_name) product_extension = old_spec.get('product_extension') ninja_target = {} ninja_target['target_name'] = target_name ninja_target['product_name'] = product_name if product_extension: ninja_target['product_extension'] = product_extension ninja_target['toolset'] = old_spec.get('toolset') ninja_target['default_configuration'] = old_spec.get('default_configuration') ninja_target['configurations'] = {} # Tell Xcode to look in |ninja_toplevel| for build products. new_xcode_settings = {} if ninja_toplevel: new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \ "%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel if 'configurations' in old_spec: for config in old_spec['configurations'].iterkeys(): old_xcode_settings = \ old_spec['configurations'][config].get('xcode_settings', {}) if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings: new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO" new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \ old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] ninja_target['configurations'][config] = {} ninja_target['configurations'][config]['xcode_settings'] = \ new_xcode_settings ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0) ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0) ninja_target['ios_watchkit_extension'] = \ old_spec.get('ios_watchkit_extension', 0) ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0) ninja_target['type'] = old_spec['type'] if ninja_toplevel: ninja_target['actions'] = [ { 'action_name': 'Compile and copy %s via ninja' % target_name, 'inputs': [], 'outputs': [], 'action': [ 'env', 'PATH=%s' % os.environ['PATH'], 'ninja', '-C', new_xcode_settings['CONFIGURATION_BUILD_DIR'], target_name, ], 'message': 'Compile and copy %s via ninja' % target_name, }, ] if jobs > 0: ninja_target['actions'][0]['action'].extend(('-j', jobs)) return ninja_target def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): """Limit targets for Xcode wrapper. Xcode sometimes performs poorly with too many targets, so only include proper executable targets, with filters to customize. Arguments: target_extras: Regular expression to always add, matching any target. executable_target_pattern: Regular expression limiting executable targets. spec: Specifications for target. """ target_name = spec.get('target_name') # Always include targets matching target_extras. if target_extras is not None and re.search(target_extras, target_name): return True # Otherwise just show executable targets. if spec.get('type', '') == 'executable' and \ spec.get('product_extension', '') != 'bundle': # If there is a filter and the target does not match, exclude the target. if executable_target_pattern is not None: if not re.search(executable_target_pattern, target_name): return False return True return False def CreateWrapper(target_list, target_dicts, data, params): """Initialize targets for the ninja wrapper. This sets up the necessary variables in the targets to generate Xcode projects that use ninja as an external builder. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dict of flattened build files keyed on gyp path. params: Dict of global options for gyp. """ orig_gyp = params['build_files'][0] for gyp_name, gyp_dict in data.iteritems(): if gyp_name == orig_gyp: depth = gyp_dict['_DEPTH'] # Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE # and prepend .ninja before the .gyp extension. generator_flags = params.get('generator_flags', {}) main_gyp = generator_flags.get('xcode_ninja_main_gyp', None) if main_gyp is None: (build_file_root, build_file_ext) = os.path.splitext(orig_gyp) main_gyp = build_file_root + ".ninja" + build_file_ext # Create new |target_list|, |target_dicts| and |data| data structures. new_target_list = [] new_target_dicts = {} new_data = {} # Set base keys needed for |data|. new_data[main_gyp] = {} new_data[main_gyp]['included_files'] = [] new_data[main_gyp]['targets'] = [] new_data[main_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) # Normally the xcode-ninja generator includes only valid executable targets. # If |xcode_ninja_executable_target_pattern| is set, that list is reduced to # executable targets that match the pattern. (Default all) executable_target_pattern = \ generator_flags.get('xcode_ninja_executable_target_pattern', None) # For including other non-executable targets, add the matching target name # to the |xcode_ninja_target_pattern| regular expression. (Default none) target_extras = generator_flags.get('xcode_ninja_target_pattern', None) for old_qualified_target in target_list: spec = target_dicts[old_qualified_target] if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): # Add to new_target_list. target_name = spec.get('target_name') new_target_name = '%s:%s#target' % (main_gyp, target_name) new_target_list.append(new_target_name) # Add to new_target_dicts. new_target_dicts[new_target_name] = _TargetFromSpec(spec, params) # Add to new_data. for old_target in data[old_qualified_target.split(':')[0]]['targets']: if old_target['target_name'] == target_name: new_data_target = {} new_data_target['target_name'] = old_target['target_name'] new_data_target['toolset'] = old_target['toolset'] new_data[main_gyp]['targets'].append(new_data_target) # Create sources target. sources_target_name = 'sources_for_indexing' sources_target = _TargetFromSpec( { 'target_name' : sources_target_name, 'toolset': 'target', 'default_configuration': 'Default', 'mac_bundle': '0', 'type': 'executable' }, None) # Tell Xcode to look everywhere for headers. sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } } sources = [] for target, target_dict in target_dicts.iteritems(): base = os.path.dirname(target) files = target_dict.get('sources', []) + \ target_dict.get('mac_bundle_resources', []) for action in target_dict.get('actions', []): files.extend(action.get('inputs', [])) # Remove files starting with $. These are mostly intermediate files for the # build system. files = [ file for file in files if not file.startswith('$')] # Make sources relative to root build file. relative_path = os.path.dirname(main_gyp) sources += [ os.path.relpath(os.path.join(base, file), relative_path) for file in files ] sources_target['sources'] = sorted(set(sources)) # Put sources_to_index in it's own gyp. sources_gyp = \ os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp") fully_qualified_target_name = \ '%s:%s#target' % (sources_gyp, sources_target_name) # Add to new_target_list, new_target_dicts and new_data. new_target_list.append(fully_qualified_target_name) new_target_dicts[fully_qualified_target_name] = sources_target new_data_target = {} new_data_target['target_name'] = sources_target['target_name'] new_data_target['_DEPTH'] = depth new_data_target['toolset'] = "target" new_data[sources_gyp] = {} new_data[sources_gyp]['targets'] = [] new_data[sources_gyp]['included_files'] = [] new_data[sources_gyp]['xcode_settings'] = \ data[orig_gyp].get('xcode_settings', {}) new_data[sources_gyp]['targets'].append(new_data_target) # Write workspace to file. _WriteWorkspace(main_gyp, sources_gyp, params) return (new_target_list, new_target_dicts, new_data)
gpl-2.0
stormi/weblate
weblate/trans/aresource.py
7
1395
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com> # # This file is part of Weblate <http://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Weblate wrapper around translate-toolkit formats to add missing functionality. """ import json from translate.storage.jsonl10n import JsonFile as JsonFileTT class JsonFile(JsonFileTT): """ Workaround ttkit bug on not including added units in saved file. """ def __str__(self): data = {} # This is really broken for many reasons, but works for # simple JSON files. for unit in self.units: data[unit.getid().lstrip('.')] = unit.source return json.dumps( data, sort_keys=True, indent=4, ensure_ascii=False ).encode('utf-8')
gpl-3.0
ryanbackman/zulip
zerver/webhooks/bitbucket2/tests.py
8
20702
# -*- coding: utf-8 -*- from mock import patch, MagicMock from typing import Optional, Text from zerver.lib.test_classes import WebhookTestCase class Bitbucket2HookTests(WebhookTestCase): STREAM_NAME = 'bitbucket2' URL_TEMPLATE = "/api/v1/external/bitbucket2?stream={stream}&api_key={api_key}" FIXTURE_DIR_NAME = 'bitbucket2' EXPECTED_SUBJECT = u"Repository name" EXPECTED_SUBJECT_PR_EVENTS = u"Repository name / PR #1 new commit" EXPECTED_SUBJECT_ISSUE_EVENTS = u"Repository name / Issue #1 Bug" EXPECTED_SUBJECT_BRANCH_EVENTS = u"Repository name / master" def test_bitbucket2_on_push_event(self): # type: () -> None commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))' expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n{}".format(commit_info) self.send_and_test_stream_message('push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_push_commits_multiple_committers(self): # type: () -> None commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n' expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 3 commits to branch master. Commits by zbenjamin (2) and kolaszek (1).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*2) self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_push_commits_multiple_committers_with_others(self): # type: () -> None commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n' expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by james (3), Brendon (2), Tomasz (2) and others (3).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*9) self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_push_commits_multiple_committers_filtered_by_branches(self): # type: () -> None self.url = self.build_webhook_url(branches='master,development') commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n' expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 3 commits to branch master. Commits by zbenjamin (2) and kolaszek (1).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*2) self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_push_commits_multiple_committers_with_others_filtered_by_branches(self): # type: () -> None self.url = self.build_webhook_url(branches='master,development') commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n' expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by james (3), Brendon (2), Tomasz (2) and others (3).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*9) self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_push_event_filtered_by_branches(self): # type: () -> None self.url = self.build_webhook_url(branches='master,development') commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))' expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n{}".format(commit_info) self.send_and_test_stream_message('push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_push_commits_above_limit_event(self): # type: () -> None commit_info = '* a ([6f161a7](https://bitbucket.org/kolaszek/repository-name/commits/6f161a7bced94430ac8947d87dbf45c6deee3fb0))\n' expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branches/compare/6f161a7bced94430ac8947d87dbf45c6deee3fb0..1221f2fda6f1e3654b09f1f3a08390e4cb25bb48) 5 commits to branch master. Commits by Tomasz (5).\n\n{}[and more commit(s)]".format( (commit_info * 5), ) self.send_and_test_stream_message('push_commits_above_limit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_push_commits_above_limit_filtered_by_branches(self): # type: () -> None self.url = self.build_webhook_url(branches='master,development') commit_info = '* a ([6f161a7](https://bitbucket.org/kolaszek/repository-name/commits/6f161a7bced94430ac8947d87dbf45c6deee3fb0))\n' expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branches/compare/6f161a7bced94430ac8947d87dbf45c6deee3fb0..1221f2fda6f1e3654b09f1f3a08390e4cb25bb48) 5 commits to branch master. Commits by Tomasz (5).\n\n{}[and more commit(s)]".format( (commit_info * 5), ) self.send_and_test_stream_message('push_commits_above_limit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_force_push_event(self): # type: () -> None expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) to branch master. Head is now 25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12" self.send_and_test_stream_message('force_push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_force_push_event_filtered_by_branches(self): # type: () -> None self.url = self.build_webhook_url(branches='master,development') expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) to branch master. Head is now 25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12" self.send_and_test_stream_message('force_push', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_remove_branch_event(self): # type: () -> None expected_message = u"kolaszek deleted branch master" self.send_and_test_stream_message('remove_branch', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message) def test_bitbucket2_on_fork_event(self): # type: () -> None expected_message = u"User Tomasz(login: kolaszek) forked the repository into [kolaszek/repository-name2](https://bitbucket.org/kolaszek/repository-name2)." self.send_and_test_stream_message('fork', self.EXPECTED_SUBJECT, expected_message) def test_bitbucket2_on_commit_comment_created_event(self): # type: () -> None expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/commits/32c4ea19aa3af10acd08e419e2c354941a365d74#comment-3354963) on [32c4ea1](https://bitbucket.org/kolaszek/repository-name/commits/32c4ea19aa3af10acd08e419e2c354941a365d74)\n~~~ quote\nNice fix!\n~~~" self.send_and_test_stream_message('commit_comment_created', self.EXPECTED_SUBJECT, expected_message) def test_bitbucket2_on_commit_status_changed_event(self): # type: () -> None expected_message = u"[System mybuildtool](https://my-build-tool.com/builds/MY-PROJECT/BUILD-777) changed status of https://bitbucket.org/kolaszek/repository-name/9fec847784abb10b2fa567ee63b85bd238955d0e to SUCCESSFUL." self.send_and_test_stream_message('commit_status_changed', self.EXPECTED_SUBJECT, expected_message) def test_bitbucket2_on_issue_created_event(self): # type: () -> None expected_message = u"kolaszek created [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)(assigned to kolaszek)\n\n~~~ quote\nSuch a bug\n~~~" self.send_and_test_stream_message('issue_created', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message) def test_bitbucket2_on_issue_updated_event(self): # type: () -> None expected_message = u"kolaszek updated [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)" self.send_and_test_stream_message('issue_updated', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message) def test_bitbucket2_on_issue_commented_event(self): # type: () -> None expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/issues/2#comment-28973596) on [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)" self.send_and_test_stream_message('issue_commented', self.EXPECTED_SUBJECT_ISSUE_EVENTS, expected_message) def test_bitbucket2_on_pull_request_created_event(self): # type: () -> None expected_message = u"kolaszek created [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)(assigned to tkolek)\nfrom `new-branch` to `master`\n\n~~~ quote\ndescription\n~~~" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:created' } self.send_and_test_stream_message('pull_request_created_or_updated', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_updated_event(self): # type: () -> None expected_message = u"kolaszek updated [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)(assigned to tkolek)\nfrom `new-branch` to `master`\n\n~~~ quote\ndescription\n~~~" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:updated' } self.send_and_test_stream_message('pull_request_created_or_updated', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_approved_event(self): # type: () -> None expected_message = u"kolaszek approved [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:approved' } self.send_and_test_stream_message('pull_request_approved_or_unapproved', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_unapproved_event(self): # type: () -> None expected_message = u"kolaszek unapproved [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:unapproved' } self.send_and_test_stream_message('pull_request_approved_or_unapproved', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_declined_event(self): # type: () -> None expected_message = u"kolaszek rejected [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:rejected' } self.send_and_test_stream_message('pull_request_fulfilled_or_rejected', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_fulfilled_event(self): # type: () -> None expected_message = u"kolaszek merged [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:fulfilled' } self.send_and_test_stream_message('pull_request_fulfilled_or_rejected', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_comment_created_event(self): # type: () -> None expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:comment_created' } self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_comment_updated_event(self): # type: () -> None expected_message = u"kolaszek updated a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:comment_updated' } self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_pull_request_comment_deleted_event(self): # type: () -> None expected_message = u"kolaszek deleted a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3)\n\n~~~ quote\nComment1\n~~~" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:comment_deleted' } self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_SUBJECT_PR_EVENTS, expected_message, **kwargs) def test_bitbucket2_on_push_one_tag_event(self): # type: () -> None expected_message = u"kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:push' } self.send_and_test_stream_message('push_one_tag', self.EXPECTED_SUBJECT, expected_message, **kwargs) def test_bitbucket2_on_push_remove_tag_event(self): # type: () -> None expected_message = u"kolaszek removed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:push' } self.send_and_test_stream_message('push_remove_tag', self.EXPECTED_SUBJECT, expected_message, **kwargs) def test_bitbucket2_on_push_more_than_one_tag_event(self): # type: () -> None expected_message = u"kolaszek pushed tag [{name}](https://bitbucket.org/kolaszek/repository-name/commits/tag/{name})" kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:push' } self.send_and_test_stream_message('push_more_than_one_tag', **kwargs) msg = self.get_last_message() self.do_test_subject(msg, self.EXPECTED_SUBJECT) self.do_test_message(msg, expected_message.format(name='b')) msg = self.get_second_to_last_message() self.do_test_subject(msg, self.EXPECTED_SUBJECT) self.do_test_message(msg, expected_message.format(name='a')) def test_bitbucket2_on_more_than_one_push_event(self): # type: () -> None kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:push' } self.send_and_test_stream_message('more_than_one_push_event', **kwargs) msg = self.get_second_to_last_message() self.do_test_message(msg, 'kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))') self.do_test_subject(msg, self.EXPECTED_SUBJECT_BRANCH_EVENTS) msg = self.get_last_message() self.do_test_message(msg, 'kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)') self.do_test_subject(msg, self.EXPECTED_SUBJECT) def test_bitbucket2_on_more_than_one_push_event_filtered_by_branches(self): # type: () -> None self.url = self.build_webhook_url(branches='master,development') kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:push' } self.send_and_test_stream_message('more_than_one_push_event', **kwargs) msg = self.get_second_to_last_message() self.do_test_message(msg, 'kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))') self.do_test_subject(msg, self.EXPECTED_SUBJECT_BRANCH_EVENTS) msg = self.get_last_message() self.do_test_message(msg, 'kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)') self.do_test_subject(msg, self.EXPECTED_SUBJECT) def test_bitbucket2_on_more_than_one_push_event_filtered_by_branches_ignore(self): # type: () -> None self.url = self.build_webhook_url(branches='changes,development') kwargs = { "HTTP_X_EVENT_KEY": 'pullrequest:push' } expected_message = u"kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)" self.send_and_test_stream_message('more_than_one_push_event', self.EXPECTED_SUBJECT, expected_message, **kwargs) @patch('zerver.webhooks.bitbucket2.view.check_send_message') def test_bitbucket2_on_push_event_filtered_by_branches_ignore( self, check_send_message_mock): # type: (MagicMock) -> None self.url = self.build_webhook_url(branches='changes,devlopment') payload = self.get_body('push') result = self.client_post(self.url, payload, content_type="application/json") self.assertFalse(check_send_message_mock.called) self.assert_json_success(result) @patch('zerver.webhooks.bitbucket2.view.check_send_message') def test_bitbucket2_on_push_commits_above_limit_filtered_by_branches_ignore( self, check_send_message_mock): # type: (MagicMock) -> None self.url = self.build_webhook_url(branches='changes,devlopment') payload = self.get_body('push_commits_above_limit') result = self.client_post(self.url, payload, content_type="application/json") self.assertFalse(check_send_message_mock.called) self.assert_json_success(result) @patch('zerver.webhooks.bitbucket2.view.check_send_message') def test_bitbucket2_on_force_push_event_filtered_by_branches_ignore( self, check_send_message_mock): # type: (MagicMock) -> None self.url = self.build_webhook_url(branches='changes,devlopment') payload = self.get_body('force_push') result = self.client_post(self.url, payload, content_type="application/json") self.assertFalse(check_send_message_mock.called) self.assert_json_success(result) @patch('zerver.webhooks.bitbucket2.view.check_send_message') def test_bitbucket2_on_push_multiple_committers_filtered_by_branches_ignore( self, check_send_message_mock): # type: (MagicMock) -> None self.url = self.build_webhook_url(branches='changes,devlopment') payload = self.get_body('push_multiple_committers') result = self.client_post(self.url, payload, content_type="application/json") self.assertFalse(check_send_message_mock.called) self.assert_json_success(result) @patch('zerver.webhooks.bitbucket2.view.check_send_message') def test_bitbucket2_on_push_multiple_committers_with_others_filtered_by_branches_ignore( self, check_send_message_mock): # type: (MagicMock) -> None self.url = self.build_webhook_url(branches='changes,devlopment') payload = self.get_body('push_multiple_committers_with_others') result = self.client_post(self.url, payload, content_type="application/json") self.assertFalse(check_send_message_mock.called) self.assert_json_success(result)
apache-2.0
tanchao/algo
codeeval/py/query_board.py
1
1315
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'tanchao' import sys SIZE = 256 # defined in challenge description MATRIX = [[0] * SIZE for i in range(SIZE)] def set_col(orders_): col_ = orders_[1] - 1 val_ = orders_[2] for i in range(SIZE): MATRIX[i][col_] = val_ def set_row(orders_): row_ = orders_[1] - 1 val_ = orders_[2] for j in range(SIZE): MATRIX[row_][j] = val_ def query_col(orders_): col_ = orders_[1] - 1 sum_ = 0 for i in range(SIZE): sum_ += MATRIX[i][col_] print sum_ def query_row(orders_): row_ = orders_[1] - 1 sum_ = 0 for j in range(SIZE): sum_ += MATRIX[row_][j] print sum_ def query_board(order_): orders_ = order_.split(' ') # no validation on input orders_[1] = int(orders_[1]) if len(orders_) == 3: orders_[2] = int(orders_[2]) if orders_[0] == 'SetCol': set_col(orders_) if orders_[0] == 'SetRow': set_row(orders_) if orders_[0] == 'QueryCol': query_col(orders_) if orders_[0] == 'QueryRow': query_row(orders_) with open(sys.argv[1], 'r') as test_cases: for test in test_cases: test = test.strip() if test: # remove ending code '\n' and not empty string query_board(test)
mit
tensorflow/model-card-toolkit
model_card_toolkit/model_card_test.py
1
7585
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for model_card_toolkit.model_card.""" import json import os import pkgutil from absl.testing import absltest import jsonschema from model_card_toolkit import model_card from model_card_toolkit.proto import model_card_pb2 from google.protobuf import text_format _FULL_PROTO_FILE_NAME = "full.pbtxt" _FULL_PROTO = pkgutil.get_data( "model_card_toolkit", os.path.join("template/test", _FULL_PROTO_FILE_NAME)) _FULL_JSON_FILE_PATH = "full.json" _FULL_JSON = model_card_json_bytestring = pkgutil.get_data( "model_card_toolkit", os.path.join("template/test", _FULL_JSON_FILE_PATH)) class ModelCardTest(absltest.TestCase): def test_copy_from_proto_and_to_proto_with_all_fields(self): want_proto = text_format.Parse(_FULL_PROTO, model_card_pb2.ModelCard()) model_card_py = model_card.ModelCard() model_card_py.copy_from_proto(want_proto) got_proto = model_card_py.to_proto() self.assertEqual(want_proto, got_proto) def test_merge_from_proto_and_to_proto_with_all_fields(self): want_proto = text_format.Parse(_FULL_PROTO, model_card_pb2.ModelCard()) model_card_py = model_card.ModelCard() model_card_py.merge_from_proto(want_proto) got_proto = model_card_py.to_proto() self.assertEqual(want_proto, got_proto) def test_copy_from_proto_sucess(self): # Test fields convert. owner = model_card.Owner(name="my_name1") owner_proto = model_card_pb2.Owner(name="my_name2", contact="my_contact2") owner.copy_from_proto(owner_proto) self.assertEqual(owner, model_card.Owner(name="my_name2", contact="my_contact2")) # Test message convert. model_details = model_card.ModelDetails( owners=[model_card.Owner(name="my_name1")]) model_details_proto = model_card_pb2.ModelDetails( owners=[model_card_pb2.Owner(name="my_name2", contact="my_contact2")]) model_details.copy_from_proto(model_details_proto) self.assertEqual( model_details, model_card.ModelDetails( owners=[model_card.Owner(name="my_name2", contact="my_contact2")])) def test_merge_from_proto_sucess(self): # Test fields convert. owner = model_card.Owner(name="my_name1") owner_proto = model_card_pb2.Owner(contact="my_contact1") owner.merge_from_proto(owner_proto) self.assertEqual(owner, model_card.Owner(name="my_name1", contact="my_contact1")) # Test message convert. model_details = model_card.ModelDetails( owners=[model_card.Owner(name="my_name1")]) model_details_proto = model_card_pb2.ModelDetails( owners=[model_card_pb2.Owner(name="my_name2", contact="my_contact2")]) model_details.merge_from_proto(model_details_proto) self.assertEqual( model_details, model_card.ModelDetails(owners=[ model_card.Owner(name="my_name1"), model_card.Owner(name="my_name2", contact="my_contact2") ])) def test_copy_from_proto_with_invalid_proto(self): owner = model_card.Owner() wrong_proto = model_card_pb2.Version() with self.assertRaisesRegex( TypeError, "<class 'model_card_toolkit.proto.model_card_pb2.Owner'> is expected. " "However <class 'model_card_toolkit.proto.model_card_pb2.Version'> is " "provided."): owner.copy_from_proto(wrong_proto) def test_merge_from_proto_with_invalid_proto(self): owner = model_card.Owner() wrong_proto = model_card_pb2.Version() with self.assertRaisesRegex(TypeError, "expected Owner got Version"): owner.merge_from_proto(wrong_proto) def test_to_proto_sucess(self): # Test fields convert. owner = model_card.Owner() self.assertEqual(owner.to_proto(), model_card_pb2.Owner()) owner.name = "my_name" self.assertEqual(owner.to_proto(), model_card_pb2.Owner(name="my_name")) owner.contact = "my_contact" self.assertEqual(owner.to_proto(), model_card_pb2.Owner(name="my_name", contact="my_contact")) # Test message convert. model_details = model_card.ModelDetails( owners=[model_card.Owner(name="my_name", contact="my_contact")]) self.assertEqual( model_details.to_proto(), model_card_pb2.ModelDetails( owners=[model_card_pb2.Owner(name="my_name", contact="my_contact")], version=model_card_pb2.Version())) def test_to_proto_with_invalid_field(self): owner = model_card.Owner() owner.wrong_field = "wrong" with self.assertRaisesRegex(ValueError, "has no such field named 'wrong_field'."): owner.to_proto() def test_from_json_and_to_json_with_all_fields(self): want_json = json.loads(_FULL_JSON) model_card_py = model_card.ModelCard()._from_json(want_json) got_json = json.loads(model_card_py.to_json()) self.assertEqual(want_json, got_json) def test_from_json_overwrites_previous_fields(self): overwritten_limitation = model_card.Limitation( description="This model can only be used on text up to 140 characters.") model_card_py = model_card.ModelCard( considerations=model_card.Considerations( limitations=[overwritten_limitation])) model_card_json = json.loads(_FULL_JSON) model_card_py = model_card_py._from_json(model_card_json) self.assertNotIn(overwritten_limitation, model_card_py.considerations.limitations) def test_from_invalid_json(self): invalid_json_dict = {"model_name": "the_greatest_model"} with self.assertRaises(jsonschema.ValidationError): model_card.ModelCard()._from_json(invalid_json_dict) def test_from_invalid_json_vesion(self): model_card_dict = { "model_details": {}, "model_parameters": {}, "quantitative_analysis": {}, "considerations": {}, "schema_version": "0.0.3" } with self.assertRaisesRegex(ValueError, ( "^Cannot find schema version that matches the version of the given " "model card.")): model_card.ModelCard()._from_json(model_card_dict) def test_from_proto_to_json(self): model_card_proto = text_format.Parse(_FULL_PROTO, model_card_pb2.ModelCard()) model_card_py = model_card.ModelCard() # Use merge_from_proto. self.assertJsonEqual( _FULL_JSON, model_card_py.merge_from_proto(model_card_proto).to_json()) # Use copy_from_proto self.assertJsonEqual( _FULL_JSON, model_card_py.copy_from_proto(model_card_proto).to_json()) def test_from_json_to_proto(self): model_card_proto = text_format.Parse(_FULL_PROTO, model_card_pb2.ModelCard()) model_card_json = json.loads(_FULL_JSON) model_card_py = model_card.ModelCard()._from_json(model_card_json) model_card_json2proto = model_card_py.to_proto() self.assertEqual(model_card_proto, model_card_json2proto) if __name__ == "__main__": absltest.main()
apache-2.0
JamesShaeffer/QGIS
tests/src/python/test_qgssimplefillsymbollayer.py
23
9165
# -*- coding: utf-8 -*- """ *************************************************************************** test_qgssimplefillsymbollayer.py --------------------- Date : November 2018 Copyright : (C) 2018 by Nyall Dawson Email : nyall dot dawson at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Nyall Dawson' __date__ = 'September 2020' __copyright__ = '(C) 2020, Nyall Dawson' import qgis # NOQA import os from utilities import unitTestDataPath from qgis.PyQt.QtCore import QDir, QPointF, Qt, QSize from qgis.PyQt.QtGui import QImage, QColor, QPainter from qgis.core import (QgsGeometry, QgsFillSymbol, QgsRenderContext, QgsFeature, QgsMapSettings, QgsRenderChecker, QgsVectorLayer, QgsSimpleFillSymbolLayer, QgsSymbolLayer, QgsProperty, QgsSingleSymbolRenderer, QgsRectangle, QgsMultiRenderChecker, QgsSymbol ) from qgis.testing import unittest, start_app start_app() TEST_DATA_DIR = unitTestDataPath() class TestQgsSimpleFillSymbolLayer(unittest.TestCase): @classmethod def setUpClass(cls): cls.report = "<h1>Python QgsSimpleFillSymbolLayer Tests</h1>\n" @classmethod def tearDownClass(cls): report_file_path = "%s/qgistest.html" % QDir.tempPath() with open(report_file_path, 'a') as report_file: report_file.write(cls.report) def testRender(self): # rendering test s = QgsFillSymbol.createSimple({'outline_color': '#ff0000', 'outline_width': '2', 'color': '#ff5588'}) g = QgsGeometry.fromWkt('Polygon((0 0, 10 0, 10 10, 0 0))') rendered_image = self.renderGeometry(s, g) assert self.imageCheck('simplefill_render', 'simplefill_render', rendered_image) def testRenderWithOffset(self): # rendering test with offset s = QgsFillSymbol.createSimple({'outline_color': '#ff0000', 'outline_width': '2', 'color': '#ff5588'}) s[0].setOffset(QPointF(5, 3)) g = QgsGeometry.fromWkt('Polygon((0 0, 10 0, 10 10, 0 0))') rendered_image = self.renderGeometry(s, g) assert self.imageCheck('simplefill_offset', 'simplefill_offset', rendered_image) def testDataDefinedOffset(self): """ test that rendering a fill symbol with data defined offset works""" polys_shp = os.path.join(TEST_DATA_DIR, 'polys.shp') polys_layer = QgsVectorLayer(polys_shp, 'Polygons', 'ogr') # lets render two layers, to make comparison easier layer = QgsSimpleFillSymbolLayer() layer.setStrokeStyle(Qt.NoPen) layer.setColor(QColor(200, 250, 50)) symbol = QgsFillSymbol() symbol.changeSymbolLayer(0, layer) layer = QgsSimpleFillSymbolLayer() layer.setDataDefinedProperty(QgsSymbolLayer.PropertyOffset, QgsProperty.fromExpression("array(-(x_min($geometry)+100)/5, (y_min($geometry)-35)/5)")) layer.setStrokeStyle(Qt.NoPen) layer.setColor(QColor(100, 150, 150)) symbol.appendSymbolLayer(layer) polys_layer.setRenderer(QgsSingleSymbolRenderer(symbol)) ms = QgsMapSettings() ms.setOutputSize(QSize(400, 400)) ms.setOutputDpi(96) ms.setExtent(QgsRectangle(-133, 22, -70, 52)) ms.setLayers([polys_layer]) # Test rendering renderchecker = QgsMultiRenderChecker() renderchecker.setMapSettings(ms) renderchecker.setControlPathPrefix('symbol_simplefill') renderchecker.setControlName('expected_simplefill_ddoffset') res = renderchecker.runTest('simplefill_ddoffset') TestQgsSimpleFillSymbolLayer.report += renderchecker.report() self.assertTrue(res) def testOpacityWithDataDefinedColor(self): poly_shp = os.path.join(TEST_DATA_DIR, 'polys.shp') poly_layer = QgsVectorLayer(poly_shp, 'Polys', 'ogr') self.assertTrue(poly_layer.isValid()) layer = QgsSimpleFillSymbolLayer() layer.setStrokeStyle(Qt.NoPen) layer.setColor(QColor(200, 250, 50)) layer.setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression( "if(Name='Dam', 'red', 'green')")) layer.setDataDefinedProperty(QgsSymbolLayer.PropertyStrokeColor, QgsProperty.fromExpression( "if(Name='Dam', 'magenta', 'blue')")) symbol = QgsFillSymbol() symbol.changeSymbolLayer(0, layer) symbol.setOpacity(0.5) poly_layer.setRenderer(QgsSingleSymbolRenderer(symbol)) ms = QgsMapSettings() ms.setOutputSize(QSize(400, 400)) ms.setOutputDpi(96) ms.setExtent(QgsRectangle(-118.5, 19.0, -81.4, 50.4)) ms.setLayers([poly_layer]) # Test rendering renderchecker = QgsMultiRenderChecker() renderchecker.setMapSettings(ms) renderchecker.setControlPathPrefix('symbol_simplefill') renderchecker.setControlName('expected_simplefill_opacityddcolor') res = renderchecker.runTest('expected_simplefill_opacityddcolor') self.report += renderchecker.report() self.assertTrue(res) def testDataDefinedOpacity(self): poly_shp = os.path.join(TEST_DATA_DIR, 'polys.shp') poly_layer = QgsVectorLayer(poly_shp, 'Polys', 'ogr') self.assertTrue(poly_layer.isValid()) layer = QgsSimpleFillSymbolLayer() layer.setStrokeStyle(Qt.NoPen) layer.setColor(QColor(200, 250, 50)) layer.setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression( "if(Name='Dam', 'red', 'green')")) layer.setDataDefinedProperty(QgsSymbolLayer.PropertyStrokeColor, QgsProperty.fromExpression( "if(Name='Dam', 'magenta', 'blue')")) symbol = QgsFillSymbol() symbol.changeSymbolLayer(0, layer) symbol.setDataDefinedProperty(QgsSymbol.PropertyOpacity, QgsProperty.fromExpression("if(\"Value\" >10, 25, 50)")) poly_layer.setRenderer(QgsSingleSymbolRenderer(symbol)) ms = QgsMapSettings() ms.setOutputSize(QSize(400, 400)) ms.setOutputDpi(96) ms.setExtent(QgsRectangle(-118.5, 19.0, -81.4, 50.4)) ms.setLayers([poly_layer]) # Test rendering renderchecker = QgsMultiRenderChecker() renderchecker.setMapSettings(ms) renderchecker.setControlPathPrefix('symbol_simplefill') renderchecker.setControlName('expected_simplefill_ddopacity') res = renderchecker.runTest('expected_simplefill_ddopacity') self.report += renderchecker.report() self.assertTrue(res) def renderGeometry(self, symbol, geom): f = QgsFeature() f.setGeometry(geom) image = QImage(200, 200, QImage.Format_RGB32) painter = QPainter() ms = QgsMapSettings() extent = geom.get().boundingBox() # buffer extent by 10% if extent.width() > 0: extent = extent.buffered((extent.height() + extent.width()) / 20.0) else: extent = extent.buffered(10) ms.setExtent(extent) ms.setOutputSize(image.size()) context = QgsRenderContext.fromMapSettings(ms) context.setPainter(painter) context.setScaleFactor(96 / 25.4) # 96 DPI painter.begin(image) try: image.fill(QColor(0, 0, 0)) symbol.startRender(context) symbol.renderFeature(f, context) symbol.stopRender(context) finally: painter.end() return image def imageCheck(self, name, reference_image, image): TestQgsSimpleFillSymbolLayer.report += "<h2>Render {}</h2>\n".format(name) temp_dir = QDir.tempPath() + '/' file_name = temp_dir + 'symbol_' + name + ".png" image.save(file_name, "PNG") checker = QgsRenderChecker() checker.setControlPathPrefix("symbol_simplefill") checker.setControlName("expected_" + reference_image) checker.setRenderedImage(file_name) checker.setColorTolerance(2) result = checker.compareImages(name, 20) TestQgsSimpleFillSymbolLayer.report += checker.report() print((TestQgsSimpleFillSymbolLayer.report)) return result if __name__ == '__main__': unittest.main()
gpl-2.0
Dik1s/volatility
volatility/plugins/privileges.py
45
6324
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # Copyright (c) 2012, 2013 Cem Gurkok <cemgurkok@gmail.com> # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # """ @author: Cem Gurkok @license: GNU General Public License 2.0 @contact: cemgurkok@gmail.com @organization: Volatility Foundation """ import re import volatility.utils as utils import volatility.obj as obj import volatility.debug as debug import volatility.plugins.taskmods as taskmods class TokenXP2003(obj.ProfileModification): before = ['WindowsOverlay', 'WindowsVTypes'] conditions = {'os': lambda x: x == 'windows', 'major': lambda x: x < 6} def modification(self, profile): profile.merge_overlay({"_TOKEN" : [None, {'Privileges': [None, ['pointer', ['array', lambda x: x.PrivilegeCount, ['_LUID_AND_ATTRIBUTES']]]], }]}) PRIVILEGE_INFO = { 2: ('SeCreateTokenPrivilege', "Create a token object"), 3: ('SeAssignPrimaryTokenPrivilege', "Replace a process-level token"), 4: ('SeLockMemoryPrivilege', "Lock pages in memory"), 5: ('SeIncreaseQuotaPrivilege', "Increase quotas"), 6: ('SeMachineAccountPrivilege', "Add workstations to the domain"), 7: ('SeTcbPrivilege', "Act as part of the operating system"), 8: ('SeSecurityPrivilege', "Manage auditing and security log"), 9: ('SeTakeOwnershipPrivilege', "Take ownership of files/objects"), 10: ('SeLoadDriverPrivilege', "Load and unload device drivers"), 11: ('SeSystemProfilePrivilege', "Profile system performance"), 12: ('SeSystemtimePrivilege', "Change the system time"), 13: ('SeProfileSingleProcessPrivilege', "Profile a single process"), 14: ('SeIncreaseBasePriorityPrivilege', "Increase scheduling priority"), 15: ('SeCreatePagefilePrivilege', "Create a pagefile"), 16: ('SeCreatePermanentPrivilege', "Create permanent shared objects"), 17: ('SeBackupPrivilege', "Backup files and directories"), 18: ('SeRestorePrivilege', "Restore files and directories"), 19: ('SeShutdownPrivilege', "Shut down the system"), 20: ('SeDebugPrivilege', "Debug programs"), 21: ('SeAuditPrivilege', "Generate security audits"), 22: ('SeSystemEnvironmentPrivilege', "Edit firmware environment values"), 23: ('SeChangeNotifyPrivilege', "Receive notifications of changes to files or directories"), 24: ('SeRemoteShutdownPrivilege', "Force shutdown from a remote system"), 25: ('SeUndockPrivilege', "Remove computer from docking station"), 26: ('SeSyncAgentPrivilege', "Synch directory service data"), 27: ('SeEnableDelegationPrivilege', "Enable user accounts to be trusted for delegation"), 28: ('SeManageVolumePrivilege', "Manage the files on a volume"), 29: ('SeImpersonatePrivilege', "Impersonate a client after authentication"), 30: ('SeCreateGlobalPrivilege', "Create global objects"), 31: ('SeTrustedCredManAccessPrivilege', "Access Credential Manager as a trusted caller"), 32: ('SeRelabelPrivilege', "Modify the mandatory integrity level of an object"), 33: ('SeIncreaseWorkingSetPrivilege', "Allocate more memory for user applications"), 34: ('SeTimeZonePrivilege', "Adjust the time zone of the computer's internal clock"), 35: ('SeCreateSymbolicLinkPrivilege', "Required to create a symbolic link"), } class Privs(taskmods.DllList): "Display process privileges" def __init__(self, config, *args): taskmods.DllList.__init__(self, config, *args) config.add_option("SILENT", short_option = "s", default = False, help = "Suppress less meaningful results", action = "store_true") config.add_option('REGEX', short_option = 'r', help = 'Show privileges matching REGEX', action = 'store', type = 'string') def render_text(self, outfd, data): self.table_header(outfd, [("Pid", "8"), ("Process", "16"), ("Value", "6"), ("Privilege", "36"), ("Attributes", "24"), ("Description", "")]) if self._config.REGEX: priv_re = re.compile(self._config.REGEX, re.I) for task in data: for value, present, enabled, default in task.get_token().privileges(): # Skip privileges whose bit positions cannot be # translated to a privilege name try: name, desc = PRIVILEGE_INFO[int(value)] except KeyError: continue # If we're operating in silent mode, only print privileges # that have been explicitly enabled by the process or that # appear to have been DKOM'd via Ceasar's proposed attack. if self._config.SILENT: if not ((enabled and not default) or (enabled and not present)): continue # Set the attributes attributes = [] if present: attributes.append("Present") if enabled: attributes.append("Enabled") if default: attributes.append("Default") if self._config.REGEX: if not priv_re.search(name): continue self.table_row(outfd, task.UniqueProcessId, task.ImageFileName, value, name, ",".join(attributes), desc)
gpl-2.0
TeamAADGT/CMPUT404-project-socialdistribution
background_task/tasks.py
1
10003
# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime, timedelta from multiprocessing.pool import ThreadPool import logging import os import sys from compat import atomic from compat import import_module from django.utils import timezone from django.utils.encoding import python_2_unicode_compatible from background_task.exceptions import BackgroundTaskError from background_task.models import Task from background_task.settings import app_settings from background_task import signals logger = logging.getLogger(__name__) _thread_pool = ThreadPool(processes=app_settings.BACKGROUND_TASK_ASYNC_THREADS) def bg_runner(proxy_task, task=None, *args, **kwargs): """ Executes the function attached to task. Used to enable threads. If a Task instance is provided, args and kwargs are ignored and retrieved from the Task itself. """ signals.task_started.send(Task) try: func = getattr(proxy_task, 'task_function', None) if isinstance(task, Task): args, kwargs = task.params() else: task_name = getattr(proxy_task, 'name', None) task_queue = getattr(proxy_task, 'queue', None) task_qs = Task.objects.get_task(task_name=task_name, args=args, kwargs=kwargs) if task_queue: task_qs = task_qs.filter(queue=task_queue) if task_qs: task = task_qs[0] if func is None: raise BackgroundTaskError("Function is None, can't execute!") func(*args, **kwargs) if task: # task done, so can delete it task.increment_attempts() completed = task.create_completed_task() signals.task_successful.send(sender=task.__class__, task_id=task.id, completed_task=completed) task.create_repetition() task.delete() logger.info('Ran task and deleting %s', task) except Exception as ex: t, e, traceback = sys.exc_info() if task: logger.error('Rescheduling %s', task, exc_info=(t, e, traceback)) signals.task_error.send(sender=ex.__class__, task=task) task.reschedule(t, e, traceback) del traceback signals.task_finished.send(Task) class Tasks(object): def __init__(self): self._tasks = {} self._runner = DBTaskRunner() self._task_proxy_class = TaskProxy self._bg_runner = bg_runner def background(self, name=None, schedule=None, queue=None): ''' decorator to turn a regular function into something that gets run asynchronously in the background, at a later time ''' # see if used as simple decorator # where first arg is the function to be decorated fn = None if name and callable(name): fn = name name = None def _decorator(fn): _name = name if not _name: _name = '%s.%s' % (fn.__module__, fn.__name__) proxy = self._task_proxy_class(_name, fn, schedule, queue, self._runner) self._tasks[_name] = proxy return proxy if fn: return _decorator(fn) return _decorator def run_task(self, task_name, args=None, kwargs=None): # task_name can be either the name of a task or a Task instance. if isinstance(task_name, Task): task = task_name task_name = task.task_name # When we have a Task instance we do not need args and kwargs, but they are kept for backward compatibility args = [] kwargs = {} else: task = None proxy_task = self._tasks[task_name] if app_settings.BACKGROUND_TASK_RUN_ASYNC: _thread_pool.apply_async(func=self._bg_runner, args=(proxy_task, task) + tuple(args), kwds=kwargs) else: self._bg_runner(proxy_task, task, *args, **kwargs) def run_next_task(self, queue=None): return self._runner.run_next_task(self, queue) class TaskSchedule(object): SCHEDULE = 0 RESCHEDULE_EXISTING = 1 CHECK_EXISTING = 2 def __init__(self, run_at=None, priority=None, action=None): self._run_at = run_at self._priority = priority self._action = action @classmethod def create(self, schedule): if isinstance(schedule, TaskSchedule): return schedule priority = None run_at = None action = None if schedule: if isinstance(schedule, (int, timedelta, datetime)): run_at = schedule else: run_at = schedule.get('run_at', None) priority = schedule.get('priority', None) action = schedule.get('action', None) return TaskSchedule(run_at=run_at, priority=priority, action=action) def merge(self, schedule): params = {} for name in ['run_at', 'priority', 'action']: attr_name = '_%s' % name value = getattr(self, attr_name, None) if value is None: params[name] = getattr(schedule, attr_name, None) else: params[name] = value return TaskSchedule(**params) @property def run_at(self): run_at = self._run_at or timezone.now() if isinstance(run_at, int): run_at = timezone.now() + timedelta(seconds=run_at) if isinstance(run_at, timedelta): run_at = timezone.now() + run_at return run_at @property def priority(self): return self._priority or 0 @property def action(self): return self._action or TaskSchedule.SCHEDULE def __repr__(self): return 'TaskSchedule(run_at=%s, priority=%s)' % (self._run_at, self._priority) def __eq__(self, other): return self._run_at == other._run_at \ and self._priority == other._priority \ and self._action == other._action class DBTaskRunner(object): ''' Encapsulate the model related logic in here, in case we want to support different queues in the future ''' def __init__(self): self.worker_name = str(os.getpid()) def schedule(self, task_name, args, kwargs, run_at=None, priority=0, action=TaskSchedule.SCHEDULE, queue=None, verbose_name=None, creator=None, repeat=None, repeat_until=None): '''Simply create a task object in the database''' task = Task.objects.new_task(task_name, args, kwargs, run_at, priority, queue, verbose_name, creator, repeat, repeat_until) if action != TaskSchedule.SCHEDULE: task_hash = task.task_hash now = timezone.now() unlocked = Task.objects.unlocked(now) existing = unlocked.filter(task_hash=task_hash) if queue: existing = existing.filter(queue=queue) if action == TaskSchedule.RESCHEDULE_EXISTING: updated = existing.update(run_at=run_at, priority=priority) if updated: return elif action == TaskSchedule.CHECK_EXISTING: if existing.count(): return task.save() signals.task_created.send(sender=self.__class__, task=task) return task @atomic def get_task_to_run(self, tasks, queue=None): available_tasks = [task for task in Task.objects.find_available(queue) if task.task_name in tasks._tasks][:5] for task in available_tasks: # try to lock task locked_task = task.lock(self.worker_name) if locked_task: return locked_task return None @atomic def run_task(self, tasks, task): logger.info('Running %s', task) tasks.run_task(task) @atomic def run_next_task(self, tasks, queue=None): # we need to commit to make sure # we can see new tasks as they arrive task = self.get_task_to_run(tasks, queue) # transaction.commit() if task: self.run_task(tasks, task) # transaction.commit() return True else: return False @python_2_unicode_compatible class TaskProxy(object): def __init__(self, name, task_function, schedule, queue, runner): self.name = name self.now = self.task_function = task_function self.runner = runner self.schedule = TaskSchedule.create(schedule) self.queue = queue def __call__(self, *args, **kwargs): schedule = kwargs.pop('schedule', None) schedule = TaskSchedule.create(schedule).merge(self.schedule) run_at = schedule.run_at priority = kwargs.pop('priority', schedule.priority) action = schedule.action queue = kwargs.pop('queue', self.queue) verbose_name = kwargs.pop('verbose_name', None) creator = kwargs.pop('creator', None) repeat = kwargs.pop('repeat', None) repeat_until = kwargs.pop('repeat_until', None) return self.runner.schedule(self.name, args, kwargs, run_at, priority, action, queue, verbose_name, creator, repeat, repeat_until) def __str__(self): return 'TaskProxy(%s)' % self.name tasks = Tasks() def autodiscover(): """ Autodiscover tasks.py files in much the same way as admin app """ import imp from django.conf import settings for app in settings.INSTALLED_APPS: try: app_path = import_module(app).__path__ except (AttributeError, ImportError): continue try: imp.find_module('tasks', app_path) except ImportError: continue import_module("%s.tasks" % app)
apache-2.0
vitaly-krugl/nupic
src/nupic/frameworks/opf/opf_basic_environment.py
10
30580
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ This script provides a file-based implementation of the ``opf_environment`` interfaces (OPF). This "basic" implementation of the interface (need a better name instead of "basic") uses files (.csv, etc.) versus Nupic's implementation that would use databases. This implementation is used by research tools, such as ``scripts/run_opf_experiment.py``. The ``opf_environment`` interfaces encapsulate external specifics, such as data source (e.g., .csv file or database, etc.), prediction sink (.csv file or databse, etc.), report and serialization destination, etc. """ from abc import ABCMeta, abstractmethod import copy import csv import json import logging import logging.handlers import os import shutil import StringIO import opf_utils import opf_environment as opfenv from nupic.data.file_record_stream import FileRecordStream from nupic.data.stream_reader import StreamReader from nupic.data.field_meta import (FieldMetaInfo, FieldMetaType, FieldMetaSpecial) from nupic.data.inference_shifter import InferenceShifter from opf_utils import InferenceType, InferenceElement class PredictionMetricsLoggerIface(object): """ This is the interface for output of prediction metrics. """ __metaclass__ = ABCMeta @abstractmethod def emitPeriodicMetrics(self, metrics): """ Emits periodic metrics to stdout in JSON. :param metrics: A list of metrics as returned by :meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`. """ @abstractmethod def emitFinalMetrics(self, metrics): """ Emits final metrics. .. note:: the intention is that the final metrics may go to a different place (e.g., csv file) versus :meth:`emitPeriodicMetrics` (e.g., stdout) :param metrics: A list of metrics as returned by :meth:`nupic.frameworks.opf.opf_task_driver.OPFTaskDriver.getMetrics`. """ class DatasetReaderIface(object): """ This is the interface class for a dataset readers """ __metaclass__ = ABCMeta @abstractmethod def getDatasetFieldMetaData(self): """ :returns: a tuple of dataset field metadata descriptors that are arranged in the same order as the columns in the dataset. Each field metadata descriptor is of type :class:`nupic.data.field_meta.FieldMetaInfo` """ @abstractmethod def next(self): """ :returns: The next record from the dataset. The returned record object is of the same structure as returned by :meth:`nupic.data.record_stream.RecordStreamIface.getNextRecord`. Returns ``None`` if the next record is not available yet. :raises: (StopIteration) if a hard "end of file" has been reached and no more records will be forthcoming. """ class PredictionWriterIface(object): """ This class defines the interface for prediction writer implementation returned by an object factory conforming to PredictionWriterFactoryIface """ __metaclass__ = ABCMeta @abstractmethod def close(self): """ Closes the writer (e.g., close the underlying file) """ @abstractmethod def append(self, inputRow, predictionRow, sequenceReset, metrics=None): """ Emits a single prediction as input versus predicted. inputRow: A tuple or list of fields comprising the input data row. predictionRow: A tuple or list of fields comprising the prediction, or None if prediction is not available. The None use case is intended for temporal inference where there is no matching prediction for the same timestep as the given ground truth, such as the case with the very first input record. sequenceReset: A value that tests True if the input row was accompanied by a sequence reset signal; False if not accompanied by a sequence reset signal. metrics: OPTIONAL -A dictionary of metrics that will be written out with every prediction. The keys are the automatically generated metric labels (see MetricSpec in prediction_metrics_manager.py), and the value is the real number value of the metric. """ @abstractmethod def checkpoint(self, checkpointSink, maxRows): """ Save a checkpoint of the prediction output stream. The checkpoint comprises up to maxRows of the most recent inference records. Parameters: ---------------------------------------------------------------------- checkpointSink: A File-like object where predictions checkpoint data, if any, will be stored. maxRows: Maximum number of most recent inference rows to checkpoint. """ class BasicPredictionMetricsLogger(PredictionMetricsLoggerIface): """ This is the file-based implementation of the interface for output of prediction metrics TODO: where should periodic and final predictions go (versus stdout) :param experimentDir: (string) path to directory for experiment to run. :param label: (string) used to distinguish the output's container (e.g., filename, directory name, property key, etc.). """ def __init__(self, experimentDir, label): self.__experimentDir = experimentDir self.__label = label return def __repr__(self): return ("%s(experimentDir=%r,label=%r)" % (self.__class__.__name__, self.__experimentDir, self.__label)) def emitPeriodicMetrics(self, metrics): jsonString = self._translateMetricsToJSON(metrics, label="PERIODIC") self._emitJSONStringToStdout(jsonString) return def emitFinalMetrics(self, metrics): jsonString = self._translateMetricsToJSON(metrics, label="FINAL") self._emitJSONStringToStdout(jsonString) return def _translateMetricsToJSON(self, metrics, label): """ Translates the given metrics value to JSON string metrics: A list of dictionaries per OPFTaskDriver.getMetrics(): Returns: JSON string representing the given metrics object. """ # Transcode the MetricValueElement values into JSON-compatible # structure metricsDict = metrics # Convert the structure to a display-friendly JSON string def _mapNumpyValues(obj): """ """ import numpy if isinstance(obj, numpy.float32): return float(obj) elif isinstance(obj, numpy.bool_): return bool(obj) elif isinstance(obj, numpy.ndarray): return obj.tolist() else: raise TypeError("UNEXPECTED OBJ: %s; class=%s" % (obj, obj.__class__)) jsonString = json.dumps(metricsDict, indent=4, default=_mapNumpyValues) return jsonString def _emitJSONStringToStdout(self, jsonString): print '<JSON>' print jsonString print '</JSON>' class BasicDatasetReader(DatasetReaderIface): """ This is a CSV file-based implementation of :class:`DatasetReaderIface`. :param streamDefDict: stream definition, as defined `here <stream-def.html>`_. """ def __init__(self, streamDefDict): # Create the object to read from self._reader = StreamReader(streamDefDict, saveOutput=True) return def __iter__(self): return self def next(self): row = self._reader.getNextRecordDict() if row == None: raise StopIteration return row def getDatasetFieldMetaData(self): return FieldMetaInfo.createListFromFileFieldList(self._reader.getFields()) class _BasicPredictionWriter(PredictionWriterIface): """ This class defines the basic (file-based) implementation of PredictionWriterIface, whose instances are returned by BasicPredictionWriterFactory """ def __init__(self, experimentDir, label, inferenceType, fields, metricNames=None, checkpointSource=None): """ Constructor experimentDir: experiment directory path that contains description.py label: A label string to incorporate into the filename. inferenceElements: inferenceType: An constant from opf_utils.InferenceType for the requested prediction writer fields: a non-empty sequence of nupic.data.fieldmeta.FieldMetaInfo representing fields that will be emitted to this prediction writer metricNames: OPTIONAL - A list of metric names that well be emiited by this prediction writer checkpointSource: If not None, a File-like object containing the previously-checkpointed predictions for setting the initial contents of this PredictionOutputStream. Will be copied before returning, if needed. """ #assert len(fields) > 0 self.__experimentDir = experimentDir # opf_utils.InferenceType kind value self.__inferenceType = inferenceType # A tuple of nupic.data.fieldmeta.FieldMetaInfo self.__inputFieldsMeta = tuple(copy.deepcopy(fields)) self.__numInputFields = len(self.__inputFieldsMeta) self.__label = label if metricNames is not None: metricNames.sort() self.__metricNames = metricNames # Define our output field meta info self.__outputFieldsMeta = [] # The list of inputs that we include in the prediction output self._rawInputNames = [] # Output dataset self.__datasetPath = None self.__dataset = None # Save checkpoint data until we're ready to create the output dataset self.__checkpointCache = None if checkpointSource is not None: checkpointSource.seek(0) self.__checkpointCache = StringIO.StringIO() shutil.copyfileobj(checkpointSource, self.__checkpointCache) return def __openDatafile(self, modelResult): """Open the data file and write the header row""" # Write reset bit resetFieldMeta = FieldMetaInfo( name="reset", type=FieldMetaType.integer, special = FieldMetaSpecial.reset) self.__outputFieldsMeta.append(resetFieldMeta) # ----------------------------------------------------------------------- # Write each of the raw inputs that go into the encoders rawInput = modelResult.rawInput rawFields = rawInput.keys() rawFields.sort() for field in rawFields: if field.startswith('_') or field == 'reset': continue value = rawInput[field] meta = FieldMetaInfo(name=field, type=FieldMetaType.string, special=FieldMetaSpecial.none) self.__outputFieldsMeta.append(meta) self._rawInputNames.append(field) # ----------------------------------------------------------------------- # Handle each of the inference elements for inferenceElement, value in modelResult.inferences.iteritems(): inferenceLabel = InferenceElement.getLabel(inferenceElement) # TODO: Right now we assume list inferences are associated with # The input field metadata if type(value) in (list, tuple): # Append input and prediction field meta-info self.__outputFieldsMeta.extend(self.__getListMetaInfo(inferenceElement)) elif isinstance(value, dict): self.__outputFieldsMeta.extend(self.__getDictMetaInfo(inferenceElement, value)) else: if InferenceElement.getInputElement(inferenceElement): self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel+".actual", type=FieldMetaType.string, special = '')) self.__outputFieldsMeta.append(FieldMetaInfo(name=inferenceLabel, type=FieldMetaType.string, special = '')) if self.__metricNames: for metricName in self.__metricNames: metricField = FieldMetaInfo( name = metricName, type = FieldMetaType.float, special = FieldMetaSpecial.none) self.__outputFieldsMeta.append(metricField) # Create the inference directory for our experiment inferenceDir = _FileUtils.createExperimentInferenceDir(self.__experimentDir) # Consctruct the prediction dataset file path filename = (self.__label + "." + opf_utils.InferenceType.getLabel(self.__inferenceType) + ".predictionLog.csv") self.__datasetPath = os.path.join(inferenceDir, filename) # Create the output dataset print "OPENING OUTPUT FOR PREDICTION WRITER AT: %r" % self.__datasetPath print "Prediction field-meta: %r" % ([tuple(i) for i in self.__outputFieldsMeta],) self.__dataset = FileRecordStream(streamID=self.__datasetPath, write=True, fields=self.__outputFieldsMeta) # Copy data from checkpoint cache if self.__checkpointCache is not None: self.__checkpointCache.seek(0) reader = csv.reader(self.__checkpointCache, dialect='excel') # Skip header row try: header = reader.next() except StopIteration: print "Empty record checkpoint initializer for %r" % (self.__datasetPath,) else: assert tuple(self.__dataset.getFieldNames()) == tuple(header), \ "dataset.getFieldNames(): %r; predictionCheckpointFieldNames: %r" % ( tuple(self.__dataset.getFieldNames()), tuple(header)) # Copy the rows from checkpoint numRowsCopied = 0 while True: try: row = reader.next() except StopIteration: break #print "DEBUG: restoring row from checkpoint: %r" % (row,) self.__dataset.appendRecord(row) numRowsCopied += 1 self.__dataset.flush() print "Restored %d rows from checkpoint for %r" % ( numRowsCopied, self.__datasetPath) # Dispose of our checkpoint cache self.__checkpointCache.close() self.__checkpointCache = None return def setLoggedMetrics(self, metricNames): """ Tell the writer which metrics should be written Parameters: ----------------------------------------------------------------------- metricsNames: A list of metric lables to be written """ if metricNames is None: self.__metricNames = set([]) else: self.__metricNames = set(metricNames) def close(self): """ [virtual method override] Closes the writer (e.g., close the underlying file) """ if self.__dataset: self.__dataset.close() self.__dataset = None return def __getListMetaInfo(self, inferenceElement): """ Get field metadata information for inferences that are of list type TODO: Right now we assume list inferences are associated with the input field metadata """ fieldMetaInfo = [] inferenceLabel = InferenceElement.getLabel(inferenceElement) for inputFieldMeta in self.__inputFieldsMeta: if InferenceElement.getInputElement(inferenceElement): outputFieldMeta = FieldMetaInfo( name=inputFieldMeta.name + ".actual", type=inputFieldMeta.type, special=inputFieldMeta.special ) predictionField = FieldMetaInfo( name=inputFieldMeta.name + "." + inferenceLabel, type=inputFieldMeta.type, special=inputFieldMeta.special ) fieldMetaInfo.append(outputFieldMeta) fieldMetaInfo.append(predictionField) return fieldMetaInfo def __getDictMetaInfo(self, inferenceElement, inferenceDict): """Get field metadate information for inferences that are of dict type""" fieldMetaInfo = [] inferenceLabel = InferenceElement.getLabel(inferenceElement) if InferenceElement.getInputElement(inferenceElement): fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+".actual", type=FieldMetaType.string, special = '')) keys = sorted(inferenceDict.keys()) for key in keys: fieldMetaInfo.append(FieldMetaInfo(name=inferenceLabel+"."+str(key), type=FieldMetaType.string, special='')) return fieldMetaInfo def append(self, modelResult): """ [virtual method override] Emits a single prediction as input versus predicted. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep. """ #print "DEBUG: _BasicPredictionWriter: writing modelResult: %r" % (modelResult,) # If there are no inferences, don't write anything inferences = modelResult.inferences hasInferences = False if inferences is not None: for value in inferences.itervalues(): hasInferences = hasInferences or (value is not None) if not hasInferences: return if self.__dataset is None: self.__openDatafile(modelResult) inputData = modelResult.sensorInput sequenceReset = int(bool(inputData.sequenceReset)) outputRow = [sequenceReset] # ----------------------------------------------------------------------- # Write out the raw inputs rawInput = modelResult.rawInput for field in self._rawInputNames: outputRow.append(str(rawInput[field])) # ----------------------------------------------------------------------- # Write out the inference element info for inferenceElement, outputVal in inferences.iteritems(): inputElement = InferenceElement.getInputElement(inferenceElement) if inputElement: inputVal = getattr(inputData, inputElement) else: inputVal = None if type(outputVal) in (list, tuple): assert type(inputVal) in (list, tuple, None) for iv, ov in zip(inputVal, outputVal): # Write actual outputRow.append(str(iv)) # Write inferred outputRow.append(str(ov)) elif isinstance(outputVal, dict): if inputVal is not None: # If we have a predicted field, include only that in the actuals if modelResult.predictedFieldName is not None: outputRow.append(str(inputVal[modelResult.predictedFieldName])) else: outputRow.append(str(inputVal)) for key in sorted(outputVal.keys()): outputRow.append(str(outputVal[key])) else: if inputVal is not None: outputRow.append(str(inputVal)) outputRow.append(str(outputVal)) metrics = modelResult.metrics for metricName in self.__metricNames: outputRow.append(metrics.get(metricName, 0.0)) #print "DEBUG: _BasicPredictionWriter: writing outputRow: %r" % (outputRow,) self.__dataset.appendRecord(outputRow) self.__dataset.flush() return def checkpoint(self, checkpointSink, maxRows): """ [virtual method override] Save a checkpoint of the prediction output stream. The checkpoint comprises up to maxRows of the most recent inference records. Parameters: ---------------------------------------------------------------------- checkpointSink: A File-like object where predictions checkpoint data, if any, will be stored. maxRows: Maximum number of most recent inference rows to checkpoint. """ checkpointSink.truncate() if self.__dataset is None: if self.__checkpointCache is not None: self.__checkpointCache.seek(0) shutil.copyfileobj(self.__checkpointCache, checkpointSink) checkpointSink.flush() return else: # Nothing to checkpoint return self.__dataset.flush() totalDataRows = self.__dataset.getDataRowCount() if totalDataRows == 0: # Nothing to checkpoint return # Open reader of prediction file (suppress missingValues conversion) reader = FileRecordStream(self.__datasetPath, missingValues=[]) # Create CSV writer for writing checkpoint rows writer = csv.writer(checkpointSink) # Write the header row to checkpoint sink -- just field names writer.writerow(reader.getFieldNames()) # Determine number of rows to checkpoint numToWrite = min(maxRows, totalDataRows) # Skip initial rows to get to the rows that we actually need to checkpoint numRowsToSkip = totalDataRows - numToWrite for i in xrange(numRowsToSkip): reader.next() # Write the data rows to checkpoint sink numWritten = 0 while True: row = reader.getNextRecord() if row is None: break; row = [str(element) for element in row] #print "DEBUG: _BasicPredictionWriter: checkpointing row: %r" % (row,) writer.writerow(row) numWritten +=1 assert numWritten == numToWrite, \ "numWritten (%s) != numToWrite (%s)" % (numWritten, numToWrite) checkpointSink.flush() return ############################################################################### # Prediction Log adapters ############################################################################### class NonTemporalPredictionLogAdapter(object): """ This class serves as an adapter for a client-instantiated Non-temporal log writer. :param writer: (:class:`PredictionWriterIface`) Non-temporal prediction log writer """ def __init__(self, writer): self.__writer = writer return def close(self): self.__writer.close() self.__writer = None return def update(self, modelResult): """ Emit a input/prediction pair, if possible. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep. """ self.__writer.append(modelResult) return class TemporalPredictionLogAdapter(object): """This class serves as an adapter for a client-instantiated Temporal log writer. It maintains a prediction FIFO for matching T(i+1) input record with T(i=1) prediction for outputting to the log writer. TODO: Right now this is broken """ def __init__(self, writer): """ writer: Non-temporal prediction log writer conforming to PredictionWriterIface interface. """ self.__logger = logging.getLogger(".".join( ['com.numenta', self.__class__.__module__, self.__class__.__name__])) self.__writer = writer self.__inferenceShifter = InferenceShifter() return def close(self): self.__writer.close() self.__writer = None return def update(self, modelResult): """ Queue up the T(i+1) prediction value and emit a T(i) input/prediction pair, if possible. E.g., if the previous T(i-1) iteration was learn-only, then we would not have a T(i) prediction in our FIFO and would not be able to emit a meaningful input/prediction pair. modelResult: An opf_utils.ModelResult object that contains the model input and output for the current timestep. """ self.__writer.append(self.__inferenceShifter.shift(modelResult)) class BasicPredictionLogger(opfenv.PredictionLoggerIface): """ This class implements logging of predictions to files as actual vs predicted values. :param fields: (list) of :class:`nupic.data.field_meta.FieldMetaInfo` objects representing the encoder-mapped data row field value sequences that will be emitted to this prediction logger. :param experimentDir: (string) experiment directory path that contains description.py :param label: (string) to incorporate into the filename. :param checkpointSource: If not None, a File-like object containing the previously-checkpointed predictions for setting the initial contents of this output stream. Will be copied before returning, if needed. """ def __init__(self, fields, experimentDir, label, inferenceType, checkpointSource=None): #assert len(fields) > 0 self.__reprString = ( "%s(fields=%r)" % ( self.__class__.__name__, fields)) self.__inputFieldsMeta = tuple(copy.deepcopy(fields)) self.__experimentDir = experimentDir self.__label = label self.__inferenceType = inferenceType self.__writer = None self.__logAdapter = None self.__loggedMetricNames = None # Save checkpoint data until we're ready to create the output writer self.__checkpointCache = None if checkpointSource is not None: checkpointSource.seek(0) self.__checkpointCache = StringIO.StringIO() shutil.copyfileobj(checkpointSource, self.__checkpointCache) return def __repr__(self): return self.__reprString def close(self): if self.__logAdapter: self.__logAdapter.close() self.__logAdapter = None return def writeRecord(self, modelResult): self.writeRecords([modelResult]) return def writeRecords(self, modelResults, progressCB=None): # Instantiate the logger if it doesn't exist yet if self.__logAdapter is None and modelResults: self.__writer = _BasicPredictionWriter( experimentDir=self.__experimentDir, label=self.__label, inferenceType=self.__inferenceType, fields=self.__inputFieldsMeta, metricNames=self.__loggedMetricNames, checkpointSource=self.__checkpointCache) # Dispose of our checkpoint cache now if self.__checkpointCache is not None: self.__checkpointCache.close() self.__checkpointCache = None if InferenceType.isTemporal(self.__inferenceType): logAdapterClass = TemporalPredictionLogAdapter else: logAdapterClass = NonTemporalPredictionLogAdapter self.__logAdapter = logAdapterClass(self.__writer) self.__writer.setLoggedMetrics(self.__loggedMetricNames) for modelResult in modelResults: if modelResult.inferences is not None: # ----------------------------------------------------------------------- # Update the prediction log self.__logAdapter.update(modelResult) else: # Handle the learn-only scenario: pass input to existing logAdapters self.__logAdapter.update(modelResult) return def setLoggedMetrics(self, metricNames): self.__loggedMetricNames = metricNames if self.__writer is not None: self.__writer.setLoggedMetrics(metricNames) def checkpoint(self, checkpointSink, maxRows): checkpointSink.truncate() if self.__writer is None: if self.__checkpointCache is not None: self.__checkpointCache.seek(0) shutil.copyfileobj(self.__checkpointCache, checkpointSink) checkpointSink.flush() return else: # Nothing to checkpoint return self.__writer.checkpoint(checkpointSink, maxRows) return class _FileUtils(object): @staticmethod def getExperimentInferenceDirPath(experimentDir): """ experimentDir: experiment directory path that contains description.py Returns: experiment inference directory path string (the path may not yet exist - see createExperimentInferenceDir()) """ return os.path.abspath(os.path.join(experimentDir, "inference")) @classmethod def createExperimentInferenceDir(cls, experimentDir): """ Creates the inference output directory for the given experiment experimentDir: experiment directory path that contains description.py Returns: path of the inference output directory """ path = cls.getExperimentInferenceDirPath(experimentDir) cls.makeDirectory(path) return path @staticmethod def makeDirectory(path): """ Makes directory for the given directory path if it doesn't already exist in the filesystem. Creates all requested directory segments as needed. path: path of the directory to create. Returns: nothing """ # Create the experiment directory # TODO Is default mode (0777) appropriate? try: os.makedirs(path) except OSError as e: if e.errno == os.errno.EEXIST: #print "Experiment directory already exists (that's okay)." pass else: raise return def test(): #testLogging() return #def testLogging(): # dir = os.path.expanduser('~/nupic/trunk/examples/opf/experiments/opfrunexperiment_test/base') # outfile = "test.log" # message = "This is a test message." # filepath = "%s/%s" % (dir,outfile) # # if os.path.exists(filepath): # os.remove(filepath) # # logOutputDesc = dict( # outputDestination = [outfile], # level = "DEBUG", # format = '%(levelname)10s: %(asctime)s - %(name)s. %(message)s' # ) # logHandlerFactory = BasicLoggingHandlerFactory(dir) # logHandlerList = logHandlerFactory(logOutputDesc) # for handler in logHandlerList: # logging.root.addHandler(handler) # # logger = logging.getLogger("test.logger") # logger.setLevel(logging.DEBUG) # # logger.debug(message) # logger.info(message) # # f = open(filepath) # fcontents = f.read() # import string # c = string.count(fcontents, message) # assert(c == 2) # os.remove(filepath) # # print "Logging test passed." # return if __name__ == "__main__": test()
agpl-3.0
nck0405/MyOwn
modules/s3/sync_adapter/mcb.py
8
9252
# -*- coding: utf-8 -*- """ S3 Synchronization: Peer Repository Adapter @copyright: 2014 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import sys import urllib, urllib2 from gluon import * from ..s3sync import S3SyncBaseAdapter # ============================================================================= class S3SyncAdapter(S3SyncBaseAdapter): """ Mariner CommandBridge Synchronization Adapter @status: experimental """ # ------------------------------------------------------------------------- def register(self): """ Register at the repository (does nothing in CommandBridge) @return: True if successful, otherwise False """ return True # ------------------------------------------------------------------------- def login(self): """ Login to the repository (does nothing in CommandBridge) @return: None if successful, otherwise error message """ return None # ------------------------------------------------------------------------- def pull(self, task, onconflict=None): """ Pull updates from this repository @param task: the task Row @param onconflict: synchronization conflict resolver @return: tuple (error, mtime), with error=None if successful, else error=message, and mtime=modification timestamp of the youngest record received """ error = "CommandBridge API pull not implemented" current.log.error(error) return (error, None) # ------------------------------------------------------------------------- def push(self, task): """ Push data for a task @param task: the task Row @return: tuple (error, mtime), with error=None if successful, else error=message, and mtime=modification timestamp of the youngest record sent """ xml = current.xml repository = self.repository resource_name = task.resource_name current.log.debug("S3SyncCommandBridge.push(%s, %s)" % (repository.url, resource_name)) # Define the resource resource = current.s3db.resource(resource_name, include_deleted=True) # Export stylesheet folder = current.request.folder import os stylesheet = os.path.join(folder, "static", "formats", "mcb", "export.xsl") # Last push last_push = task.last_push # Apply sync filters for this task filters = current.sync.get_filters(task.id) settings = current.deployment_settings identifiers = settings.get_sync_mcb_resource_identifiers() resources = "".join("[%s:%s]" % (k, v) for k, v in identifiers.items()) identifiers = settings.get_sync_mcb_domain_identifiers() domains = "".join("[%s:%s]" % (k, v) for k, v in identifiers.items()) # Export the resource as S3XML data = resource.export_xml(filters = filters, msince = last_push, stylesheet = stylesheet, pretty_print = True, resources = resources, domains = domains, ) count = resource.results or 0 mtime = resource.muntil # Transmit the data via HTTP remote = False output = None log = repository.log if data and count: #print data #response, message = None, None response, message = self._send_request(method = "POST", path = "BulkStream", data = data, ) if response is None: result = log.FATAL remote = True if not message: message = "unknown error" output = message else: result = log.SUCCESS message = "Data sent successfully (%s records)" % count else: # No data to send result = log.WARNING message = "No data to send" # Log the operation log.write(repository_id = repository.id, resource_name = resource_name, transmission = log.OUT, mode = log.PUSH, action = "send", remote = remote, result = result, message = message) if output is not None: mtime = None return (output, mtime) # ------------------------------------------------------------------------- # Internal methods: # ------------------------------------------------------------------------- def _send_request(self, method="GET", path=None, args=None, data=None, auth=False): """ Send a request to the CommandBridge API @param method: the HTTP method @param path: the path relative to the repository URL @param data: the data to send @param auth: this is an authorization request """ xml = current.xml repository = self.repository # Request URL url = repository.url.rstrip("/") if path: url = "/".join((url, path.lstrip("/"))) if args: url = "?".join((url, urllib.urlencode(args))) # Create the request req = urllib2.Request(url=url) handlers = [] site_key = repository.site_key if not site_key: message = "CommandBridge Authorization failed: no access token (site key)" current.log.error(message) return None, message req.add_header("Authorization-Token", "%s" % site_key) # Request Data request_data = data if data is not None else "" if request_data: req.add_header("Content-Type", "application/xml") # Indicate that we expect XML response req.add_header("Accept", "application/xml") # Proxy handling config = repository.config proxy = repository.proxy or config.proxy or None if proxy: current.log.debug("using proxy=%s" % proxy) proxy_handler = urllib2.ProxyHandler({"https": proxy}) handlers.append(proxy_handler) # Install all handlers if handlers: opener = urllib2.build_opener(*handlers) urllib2.install_opener(opener) # Execute the request response = None message = None try: if method == "POST": #print >> sys.stderr, request_data f = urllib2.urlopen(req, data=request_data) else: f = urllib2.urlopen(req) except urllib2.HTTPError, e: message = "HTTP %s: %s" % (e.code, e.reason) # More details may be in the response body error_response = xml.parse(e) if error_response: error_messages = error_response.findall("Message") details = " / ".join(item.text for item in error_messages) message = "%s (%s)" % (message, details) else: response = xml.parse(f) if response is None: if method == "POST": response = True elif xml.error: message = xml.error return response, message # End =========================================================================
mit
JulienMcJay/eclock
windows/Python27/Lib/encodings/cp862.py
593
33626
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp862', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x05d0, # HEBREW LETTER ALEF 0x0081: 0x05d1, # HEBREW LETTER BET 0x0082: 0x05d2, # HEBREW LETTER GIMEL 0x0083: 0x05d3, # HEBREW LETTER DALET 0x0084: 0x05d4, # HEBREW LETTER HE 0x0085: 0x05d5, # HEBREW LETTER VAV 0x0086: 0x05d6, # HEBREW LETTER ZAYIN 0x0087: 0x05d7, # HEBREW LETTER HET 0x0088: 0x05d8, # HEBREW LETTER TET 0x0089: 0x05d9, # HEBREW LETTER YOD 0x008a: 0x05da, # HEBREW LETTER FINAL KAF 0x008b: 0x05db, # HEBREW LETTER KAF 0x008c: 0x05dc, # HEBREW LETTER LAMED 0x008d: 0x05dd, # HEBREW LETTER FINAL MEM 0x008e: 0x05de, # HEBREW LETTER MEM 0x008f: 0x05df, # HEBREW LETTER FINAL NUN 0x0090: 0x05e0, # HEBREW LETTER NUN 0x0091: 0x05e1, # HEBREW LETTER SAMEKH 0x0092: 0x05e2, # HEBREW LETTER AYIN 0x0093: 0x05e3, # HEBREW LETTER FINAL PE 0x0094: 0x05e4, # HEBREW LETTER PE 0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI 0x0096: 0x05e6, # HEBREW LETTER TSADI 0x0097: 0x05e7, # HEBREW LETTER QOF 0x0098: 0x05e8, # HEBREW LETTER RESH 0x0099: 0x05e9, # HEBREW LETTER SHIN 0x009a: 0x05ea, # HEBREW LETTER TAV 0x009b: 0x00a2, # CENT SIGN 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00a5, # YEN SIGN 0x009e: 0x20a7, # PESETA SIGN 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR 0x00a8: 0x00bf, # INVERTED QUESTION MARK 0x00a9: 0x2310, # REVERSED NOT SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA 0x00e3: 0x03c0, # GREEK SMALL LETTER PI 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA 0x00ec: 0x221e, # INFINITY 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON 0x00ef: 0x2229, # INTERSECTION 0x00f0: 0x2261, # IDENTICAL TO 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO 0x00f4: 0x2320, # TOP HALF INTEGRAL 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x2248, # ALMOST EQUAL TO 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x221a, # SQUARE ROOT 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( u'\x00' # 0x0000 -> NULL u'\x01' # 0x0001 -> START OF HEADING u'\x02' # 0x0002 -> START OF TEXT u'\x03' # 0x0003 -> END OF TEXT u'\x04' # 0x0004 -> END OF TRANSMISSION u'\x05' # 0x0005 -> ENQUIRY u'\x06' # 0x0006 -> ACKNOWLEDGE u'\x07' # 0x0007 -> BELL u'\x08' # 0x0008 -> BACKSPACE u'\t' # 0x0009 -> HORIZONTAL TABULATION u'\n' # 0x000a -> LINE FEED u'\x0b' # 0x000b -> VERTICAL TABULATION u'\x0c' # 0x000c -> FORM FEED u'\r' # 0x000d -> CARRIAGE RETURN u'\x0e' # 0x000e -> SHIFT OUT u'\x0f' # 0x000f -> SHIFT IN u'\x10' # 0x0010 -> DATA LINK ESCAPE u'\x11' # 0x0011 -> DEVICE CONTROL ONE u'\x12' # 0x0012 -> DEVICE CONTROL TWO u'\x13' # 0x0013 -> DEVICE CONTROL THREE u'\x14' # 0x0014 -> DEVICE CONTROL FOUR u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x0016 -> SYNCHRONOUS IDLE u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK u'\x18' # 0x0018 -> CANCEL u'\x19' # 0x0019 -> END OF MEDIUM u'\x1a' # 0x001a -> SUBSTITUTE u'\x1b' # 0x001b -> ESCAPE u'\x1c' # 0x001c -> FILE SEPARATOR u'\x1d' # 0x001d -> GROUP SEPARATOR u'\x1e' # 0x001e -> RECORD SEPARATOR u'\x1f' # 0x001f -> UNIT SEPARATOR u' ' # 0x0020 -> SPACE u'!' # 0x0021 -> EXCLAMATION MARK u'"' # 0x0022 -> QUOTATION MARK u'#' # 0x0023 -> NUMBER SIGN u'$' # 0x0024 -> DOLLAR SIGN u'%' # 0x0025 -> PERCENT SIGN u'&' # 0x0026 -> AMPERSAND u"'" # 0x0027 -> APOSTROPHE u'(' # 0x0028 -> LEFT PARENTHESIS u')' # 0x0029 -> RIGHT PARENTHESIS u'*' # 0x002a -> ASTERISK u'+' # 0x002b -> PLUS SIGN u',' # 0x002c -> COMMA u'-' # 0x002d -> HYPHEN-MINUS u'.' # 0x002e -> FULL STOP u'/' # 0x002f -> SOLIDUS u'0' # 0x0030 -> DIGIT ZERO u'1' # 0x0031 -> DIGIT ONE u'2' # 0x0032 -> DIGIT TWO u'3' # 0x0033 -> DIGIT THREE u'4' # 0x0034 -> DIGIT FOUR u'5' # 0x0035 -> DIGIT FIVE u'6' # 0x0036 -> DIGIT SIX u'7' # 0x0037 -> DIGIT SEVEN u'8' # 0x0038 -> DIGIT EIGHT u'9' # 0x0039 -> DIGIT NINE u':' # 0x003a -> COLON u';' # 0x003b -> SEMICOLON u'<' # 0x003c -> LESS-THAN SIGN u'=' # 0x003d -> EQUALS SIGN u'>' # 0x003e -> GREATER-THAN SIGN u'?' # 0x003f -> QUESTION MARK u'@' # 0x0040 -> COMMERCIAL AT u'A' # 0x0041 -> LATIN CAPITAL LETTER A u'B' # 0x0042 -> LATIN CAPITAL LETTER B u'C' # 0x0043 -> LATIN CAPITAL LETTER C u'D' # 0x0044 -> LATIN CAPITAL LETTER D u'E' # 0x0045 -> LATIN CAPITAL LETTER E u'F' # 0x0046 -> LATIN CAPITAL LETTER F u'G' # 0x0047 -> LATIN CAPITAL LETTER G u'H' # 0x0048 -> LATIN CAPITAL LETTER H u'I' # 0x0049 -> LATIN CAPITAL LETTER I u'J' # 0x004a -> LATIN CAPITAL LETTER J u'K' # 0x004b -> LATIN CAPITAL LETTER K u'L' # 0x004c -> LATIN CAPITAL LETTER L u'M' # 0x004d -> LATIN CAPITAL LETTER M u'N' # 0x004e -> LATIN CAPITAL LETTER N u'O' # 0x004f -> LATIN CAPITAL LETTER O u'P' # 0x0050 -> LATIN CAPITAL LETTER P u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q u'R' # 0x0052 -> LATIN CAPITAL LETTER R u'S' # 0x0053 -> LATIN CAPITAL LETTER S u'T' # 0x0054 -> LATIN CAPITAL LETTER T u'U' # 0x0055 -> LATIN CAPITAL LETTER U u'V' # 0x0056 -> LATIN CAPITAL LETTER V u'W' # 0x0057 -> LATIN CAPITAL LETTER W u'X' # 0x0058 -> LATIN CAPITAL LETTER X u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y u'Z' # 0x005a -> LATIN CAPITAL LETTER Z u'[' # 0x005b -> LEFT SQUARE BRACKET u'\\' # 0x005c -> REVERSE SOLIDUS u']' # 0x005d -> RIGHT SQUARE BRACKET u'^' # 0x005e -> CIRCUMFLEX ACCENT u'_' # 0x005f -> LOW LINE u'`' # 0x0060 -> GRAVE ACCENT u'a' # 0x0061 -> LATIN SMALL LETTER A u'b' # 0x0062 -> LATIN SMALL LETTER B u'c' # 0x0063 -> LATIN SMALL LETTER C u'd' # 0x0064 -> LATIN SMALL LETTER D u'e' # 0x0065 -> LATIN SMALL LETTER E u'f' # 0x0066 -> LATIN SMALL LETTER F u'g' # 0x0067 -> LATIN SMALL LETTER G u'h' # 0x0068 -> LATIN SMALL LETTER H u'i' # 0x0069 -> LATIN SMALL LETTER I u'j' # 0x006a -> LATIN SMALL LETTER J u'k' # 0x006b -> LATIN SMALL LETTER K u'l' # 0x006c -> LATIN SMALL LETTER L u'm' # 0x006d -> LATIN SMALL LETTER M u'n' # 0x006e -> LATIN SMALL LETTER N u'o' # 0x006f -> LATIN SMALL LETTER O u'p' # 0x0070 -> LATIN SMALL LETTER P u'q' # 0x0071 -> LATIN SMALL LETTER Q u'r' # 0x0072 -> LATIN SMALL LETTER R u's' # 0x0073 -> LATIN SMALL LETTER S u't' # 0x0074 -> LATIN SMALL LETTER T u'u' # 0x0075 -> LATIN SMALL LETTER U u'v' # 0x0076 -> LATIN SMALL LETTER V u'w' # 0x0077 -> LATIN SMALL LETTER W u'x' # 0x0078 -> LATIN SMALL LETTER X u'y' # 0x0079 -> LATIN SMALL LETTER Y u'z' # 0x007a -> LATIN SMALL LETTER Z u'{' # 0x007b -> LEFT CURLY BRACKET u'|' # 0x007c -> VERTICAL LINE u'}' # 0x007d -> RIGHT CURLY BRACKET u'~' # 0x007e -> TILDE u'\x7f' # 0x007f -> DELETE u'\u05d0' # 0x0080 -> HEBREW LETTER ALEF u'\u05d1' # 0x0081 -> HEBREW LETTER BET u'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL u'\u05d3' # 0x0083 -> HEBREW LETTER DALET u'\u05d4' # 0x0084 -> HEBREW LETTER HE u'\u05d5' # 0x0085 -> HEBREW LETTER VAV u'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN u'\u05d7' # 0x0087 -> HEBREW LETTER HET u'\u05d8' # 0x0088 -> HEBREW LETTER TET u'\u05d9' # 0x0089 -> HEBREW LETTER YOD u'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF u'\u05db' # 0x008b -> HEBREW LETTER KAF u'\u05dc' # 0x008c -> HEBREW LETTER LAMED u'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM u'\u05de' # 0x008e -> HEBREW LETTER MEM u'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN u'\u05e0' # 0x0090 -> HEBREW LETTER NUN u'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH u'\u05e2' # 0x0092 -> HEBREW LETTER AYIN u'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE u'\u05e4' # 0x0094 -> HEBREW LETTER PE u'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI u'\u05e6' # 0x0096 -> HEBREW LETTER TSADI u'\u05e7' # 0x0097 -> HEBREW LETTER QOF u'\u05e8' # 0x0098 -> HEBREW LETTER RESH u'\u05e9' # 0x0099 -> HEBREW LETTER SHIN u'\u05ea' # 0x009a -> HEBREW LETTER TAV u'\xa2' # 0x009b -> CENT SIGN u'\xa3' # 0x009c -> POUND SIGN u'\xa5' # 0x009d -> YEN SIGN u'\u20a7' # 0x009e -> PESETA SIGN u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK u'\u2310' # 0x00a9 -> REVERSED NOT SIGN u'\xac' # 0x00aa -> NOT SIGN u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u2591' # 0x00b0 -> LIGHT SHADE u'\u2592' # 0x00b1 -> MEDIUM SHADE u'\u2593' # 0x00b2 -> DARK SHADE u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT u'\u2588' # 0x00db -> FULL BLOCK u'\u2584' # 0x00dc -> LOWER HALF BLOCK u'\u258c' # 0x00dd -> LEFT HALF BLOCK u'\u2590' # 0x00de -> RIGHT HALF BLOCK u'\u2580' # 0x00df -> UPPER HALF BLOCK u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA u'\xb5' # 0x00e6 -> MICRO SIGN u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA u'\u221e' # 0x00ec -> INFINITY u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON u'\u2229' # 0x00ef -> INTERSECTION u'\u2261' # 0x00f0 -> IDENTICAL TO u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL u'\xf7' # 0x00f6 -> DIVISION SIGN u'\u2248' # 0x00f7 -> ALMOST EQUAL TO u'\xb0' # 0x00f8 -> DEGREE SIGN u'\u2219' # 0x00f9 -> BULLET OPERATOR u'\xb7' # 0x00fa -> MIDDLE DOT u'\u221a' # 0x00fb -> SQUARE ROOT u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N u'\xb2' # 0x00fd -> SUPERSCRIPT TWO u'\u25a0' # 0x00fe -> BLACK SQUARE u'\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK 0x00a2: 0x009b, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a5: 0x009d, # YEN SIGN 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b5: 0x00e6, # MICRO SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00bf: 0x00a8, # INVERTED QUESTION MARK 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f7: 0x00f6, # DIVISION SIGN 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON 0x03c0: 0x00e3, # GREEK SMALL LETTER PI 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI 0x05d0: 0x0080, # HEBREW LETTER ALEF 0x05d1: 0x0081, # HEBREW LETTER BET 0x05d2: 0x0082, # HEBREW LETTER GIMEL 0x05d3: 0x0083, # HEBREW LETTER DALET 0x05d4: 0x0084, # HEBREW LETTER HE 0x05d5: 0x0085, # HEBREW LETTER VAV 0x05d6: 0x0086, # HEBREW LETTER ZAYIN 0x05d7: 0x0087, # HEBREW LETTER HET 0x05d8: 0x0088, # HEBREW LETTER TET 0x05d9: 0x0089, # HEBREW LETTER YOD 0x05da: 0x008a, # HEBREW LETTER FINAL KAF 0x05db: 0x008b, # HEBREW LETTER KAF 0x05dc: 0x008c, # HEBREW LETTER LAMED 0x05dd: 0x008d, # HEBREW LETTER FINAL MEM 0x05de: 0x008e, # HEBREW LETTER MEM 0x05df: 0x008f, # HEBREW LETTER FINAL NUN 0x05e0: 0x0090, # HEBREW LETTER NUN 0x05e1: 0x0091, # HEBREW LETTER SAMEKH 0x05e2: 0x0092, # HEBREW LETTER AYIN 0x05e3: 0x0093, # HEBREW LETTER FINAL PE 0x05e4: 0x0094, # HEBREW LETTER PE 0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI 0x05e6: 0x0096, # HEBREW LETTER TSADI 0x05e7: 0x0097, # HEBREW LETTER QOF 0x05e8: 0x0098, # HEBREW LETTER RESH 0x05e9: 0x0099, # HEBREW LETTER SHIN 0x05ea: 0x009a, # HEBREW LETTER TAV 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N 0x20a7: 0x009e, # PESETA SIGN 0x2219: 0x00f9, # BULLET OPERATOR 0x221a: 0x00fb, # SQUARE ROOT 0x221e: 0x00ec, # INFINITY 0x2229: 0x00ef, # INTERSECTION 0x2248: 0x00f7, # ALMOST EQUAL TO 0x2261: 0x00f0, # IDENTICAL TO 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO 0x2310: 0x00a9, # REVERSED NOT SIGN 0x2320: 0x00f4, # TOP HALF INTEGRAL 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
gpl-2.0
esikachev/scenario
sahara/utils/openstack/base.py
4
2712
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json from oslo_config import cfg from six.moves.urllib import parse as urlparse from sahara import context from sahara import exceptions as ex from sahara.i18n import _ CONF = cfg.CONF def url_for(service_catalog, service_type, admin=False, endpoint_type=None): if not endpoint_type: endpoint_type = 'publicURL' if admin: endpoint_type = 'adminURL' service = _get_service_from_catalog(service_catalog, service_type) if service: endpoints = service['endpoints'] if CONF.os_region_name: endpoints = [e for e in endpoints if e['region'] == CONF.os_region_name] try: return _get_endpoint_url(endpoints, endpoint_type) except Exception: raise ex.SystemError( _("Endpoint with type %(type)s is not found for service " "%(service)s") % {'type': endpoint_type, 'service': service_type}) else: raise ex.SystemError( _('Service "%s" not found in service catalog') % service_type) def _get_service_from_catalog(catalog, service_type): if catalog: catalog = json.loads(catalog) for service in catalog: if service['type'] == service_type: return service return None def _get_endpoint_url(endpoints, endpoint_type): if 'interface' in endpoints[0]: endpoint_type = endpoint_type[0:-3] for endpoint in endpoints: if endpoint['interface'] == endpoint_type: return endpoint['url'] return _get_case_insensitive(endpoints[0], endpoint_type) def _get_case_insensitive(dictionary, key): for k, v in dictionary.items(): if str(k).lower() == str(key).lower(): return v # this will raise an exception as usual if key was not found return dictionary[key] def retrieve_auth_url(): info = urlparse.urlparse(context.current().auth_uri) version = 'v3' if CONF.use_identity_api_v3 else 'v2.0' return "%s://%s:%s/%s/" % (info.scheme, info.hostname, info.port, version)
apache-2.0
aerickson/ansible
lib/ansible/modules/network/lenovo/cnos_bgp.py
59
19250
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Lenovo, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # Module to send BGP commands to Lenovo Switches # Lenovo Networking # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cnos_bgp author: "Dave Kasberg (@dkasberg)" short_description: Manage BGP resources and attributes on devices running Lenovo CNOS description: - This module allows you to work with Border Gateway Protocol (BGP) related configurations. The operators used are overloaded to ensure control over switch BGP configurations. This module is invoked using method with asNumber as one of its arguments. The first level of the BGP configuration allows to set up an AS number, with the following attributes going into various configuration operations under the context of BGP. After passing this level, there are eight BGP arguments that will perform further configurations. They are bgpArg1, bgpArg2, bgpArg3, bgpArg4, bgpArg5, bgpArg6, bgpArg7, and bgpArg8. For more details on how to use these arguments, see [Overloaded Variables]. This module uses SSH to manage network device configuration. The results of the operation will be placed in a directory named 'results' that must be created by the user in their local directory to where the playbook is run. For more information about this module from Lenovo and customizing it usage for your use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_bgp.html) version_added: "2.3" extends_documentation_fragment: cnos options: asNum: description: - AS number required: Yes default: Null bgpArg1: description: - This is an overloaded bgp first argument. Usage of this argument can be found is the User Guide referenced above. required: Yes default: Null choices: [address-family,bestpath,bgp,cluster-id,confederation,enforce-first-as,fast-external-failover, graceful-restart,graceful-restart-helper,log-neighbor-changes,maxas-limit,neighbor,router-id,shutdown, synchronization,timers,vrf] bgpArg2: description: - This is an overloaded bgp second argument. Usage of this argument can be found is the User Guide referenced above. required: No default: Null choices: [ipv4 or ipv6, always-compare-med,compare-confed-aspath,compare-routerid,dont-compare-originator-id,tie-break-on-age, as-path,med,identifier,peers] bgpArg3: description: - This is an overloaded bgp third argument. Usage of this argument can be found is the User Guide referenced above. required: No default: Null choices: [aggregate-address,client-to-client,dampening,distance,maximum-paths,network,nexthop,redistribute,save,synchronization, ignore or multipath-relax, confed or missing-as-worst or non-deterministic or remove-recv-med or remove-send-med] bgpArg4: description: - This is an overloaded bgp fourth argument. Usage of this argument can be found is the User Guide referenced above. required: No default: Null choices: [Aggregate prefix, Reachability Half-life time,route-map, Distance for routes external,ebgp or ibgp, IP prefix <network>,IP prefix <network>/<length>, synchronization, Delay value, direct, ospf, static, memory] bgpArg5: description: - This is an overloaded bgp fifth argument. Usage of this argument can be found is the User Guide referenced above. required: No default: Null choices: [as-set, summary-only, Value to start reusing a route, Distance for routes internal, Supported multipath numbers, backdoor, map, route-map ] bgpArg6: description: - This is an overloaded bgp sixth argument. Usage of this argument can be found is the User Guide referenced above. required: No default: Null choices: [summary-only,as-set, route-map name, Value to start suppressing a route, Distance for local routes, Network mask, Pointer to route-map entries] bgpArg7: description: - This is an overloaded bgp seventh argument. Usage of this argument can be found is the User Guide referenced above. required: No default: Null choices: [ Maximum duration to suppress a stable route(minutes), backdoor,route-map, Name of the route map ] bgpArg8: description: - This is an overloaded bgp eigth argument. Usage of this argument can be found is the User Guide referenced above. required: No default: Null choices: [ Un-reachability Half-life time for the penalty(minutes), backdoor] ''' EXAMPLES = ''' Tasks: The following are examples of using the module cnos_bgp. These are written in the main.yml file of the tasks directory. --- - name: Test BGP - neighbor cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "neighbor" bgpArg2: "10.241.107.40" bgpArg3: 13 bgpArg4: "address-family" bgpArg5: "ipv4" bgpArg6: "next-hop-self" - name: Test BGP - BFD cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "neighbor" bgpArg2: "10.241.107.40" bgpArg3: 13 bgpArg4: "bfd" - name: Test BGP - address-family - dampening cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "address-family" bgpArg2: "ipv4" bgpArg3: "dampening" bgpArg4: 13 bgpArg5: 233 bgpArg6: 333 bgpArg7: 15 bgpArg8: 33 - name: Test BGP - address-family - network cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "address-family" bgpArg2: "ipv4" bgpArg3: "network" bgpArg4: "1.2.3.4/5" bgpArg5: "backdoor" - name: Test BGP - bestpath - always-compare-med cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "bestpath" bgpArg2: "always-compare-med" - name: Test BGP - bestpath-compare-confed-aspat cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "bestpath" bgpArg2: "compare-confed-aspath" - name: Test BGP - bgp cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "bgp" bgpArg2: 33 - name: Test BGP - cluster-id cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "cluster-id" bgpArg2: "1.2.3.4" - name: Test BGP - confederation-identifier cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "confederation" bgpArg2: "identifier" bgpArg3: 333 - name: Test BGP - enforce-first-as cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "enforce-first-as" - name: Test BGP - fast-external-failover cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "fast-external-failover" - name: Test BGP - graceful-restart cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "graceful-restart" bgpArg2: 333 - name: Test BGP - graceful-restart-helper cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "graceful-restart-helper" - name: Test BGP - maxas-limit cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "maxas-limit" bgpArg2: 333 - name: Test BGP - neighbor cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "neighbor" bgpArg2: "10.241.107.40" bgpArg3: 13 bgpArg4: "address-family" bgpArg5: "ipv4" bgpArg6: "next-hop-self" - name: Test BGP - router-id cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "router-id" bgpArg2: "1.2.3.4" - name: Test BGP - synchronization cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "synchronization" - name: Test BGP - timers cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "timers" bgpArg2: 333 bgpArg3: 3333 - name: Test BGP - vrf cnos_bgp: host: "{{ inventory_hostname }}" username: "{{ hostvars[inventory_hostname]['username'] }}" password: "{{ hostvars[inventory_hostname]['password'] }}" deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}" enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}" outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt" asNum: 33 bgpArg1: "vrf" ''' RETURN = ''' msg: description: Success or failure message. Upon any failure, the method returns an error display string. returned: always type: string ''' import sys import paramiko import time import argparse import socket import array import json import time import re try: from ansible.module_utils import cnos HAS_LIB = True except: HAS_LIB = False from ansible.module_utils.basic import AnsibleModule from collections import defaultdict def main(): module = AnsibleModule( argument_spec=dict( outputfile=dict(required=True), host=dict(required=True), username=dict(required=True), password=dict(required=True, no_log=True), enablePassword=dict(required=False, no_log=True), deviceType=dict(required=True), bgpArg1=dict(required=True), bgpArg2=dict(required=False), bgpArg3=dict(required=False), bgpArg4=dict(required=False), bgpArg5=dict(required=False), bgpArg6=dict(required=False), bgpArg7=dict(required=False), bgpArg8=dict(required=False), asNum=dict(required=True),), supports_check_mode=False) username = module.params['username'] password = module.params['password'] enablePassword = module.params['enablePassword'] bgpArg1 = module.params['bgpArg1'] bgpArg2 = module.params['bgpArg2'] bgpArg3 = module.params['bgpArg3'] bgpArg4 = module.params['bgpArg4'] bgpArg5 = module.params['bgpArg5'] bgpArg6 = module.params['bgpArg6'] bgpArg7 = module.params['bgpArg7'] bgpArg8 = module.params['bgpArg8'] asNum = module.params['asNum'] outputfile = module.params['outputfile'] hostIP = module.params['host'] deviceType = module.params['deviceType'] output = "" # Create instance of SSHClient object remote_conn_pre = paramiko.SSHClient() # Automatically add untrusted hosts (make sure okay for security policy in your environment) remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # initiate SSH connection with the switch remote_conn_pre.connect(hostIP, username=username, password=password) time.sleep(2) # Use invoke_shell to establish an 'interactive session' remote_conn = remote_conn_pre.invoke_shell() time.sleep(2) # Enable and enter configure terminal then send command output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn) output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn) # Make terminal length = 0 output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn) # Go to config mode output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn) # Send the CLi command output = output + cnos.routerConfig(remote_conn, deviceType, "(config)#", 2, "bgp", asNum, bgpArg1, bgpArg2, bgpArg3, bgpArg4, bgpArg5, bgpArg6, bgpArg7, bgpArg8) # Save it into the file file = open(outputfile, "a") file.write(output) file.close() # Logic to check when changes occur or not errorMsg = cnos.checkOutputForError(output) if(errorMsg is None): module.exit_json(changed=True, msg="BGP configurations accomplished") else: module.fail_json(msg=errorMsg) if __name__ == '__main__': main()
gpl-3.0
kifcaliph/odoo
addons/subscription/__init__.py
441
1076
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import subscription # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
rishikksh20/scikit-learn
examples/ensemble/plot_feature_transformation.py
115
4327
""" =============================================== Feature transformations with ensembles of trees =============================================== Transform your features into a higher dimensional, sparse space. Then train a linear model on these features. First fit an ensemble of trees (totally random trees, a random forest, or gradient boosted trees) on the training set. Then each leaf of each tree in the ensemble is assigned a fixed arbitrary feature index in a new feature space. These leaf indices are then encoded in a one-hot fashion. Each sample goes through the decisions of each tree of the ensemble and ends up in one leaf per tree. The sample is encoded by setting feature values for these leaves to 1 and the other feature values to 0. The resulting transformer has then learned a supervised, sparse, high-dimensional categorical embedding of the data. """ # Author: Tim Head <betatim@gmail.com> # # License: BSD 3 clause import numpy as np np.random.seed(10) import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier, GradientBoostingClassifier) from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve from sklearn.pipeline import make_pipeline n_estimator = 10 X, y = make_classification(n_samples=80000) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5) # It is important to train the ensemble of trees on a different subset # of the training data than the linear regression model to avoid # overfitting, in particular if the total number of leaves is # similar to the number of training samples X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train, y_train, test_size=0.5) # Unsupervised transformation based on totally random trees rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator, random_state=0) rt_lm = LogisticRegression() pipeline = make_pipeline(rt, rt_lm) pipeline.fit(X_train, y_train) y_pred_rt = pipeline.predict_proba(X_test)[:, 1] fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt) # Supervised transformation based on random forests rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator) rf_enc = OneHotEncoder() rf_lm = LogisticRegression() rf.fit(X_train, y_train) rf_enc.fit(rf.apply(X_train)) rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr) y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1] fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm) grd = GradientBoostingClassifier(n_estimators=n_estimator) grd_enc = OneHotEncoder() grd_lm = LogisticRegression() grd.fit(X_train, y_train) grd_enc.fit(grd.apply(X_train)[:, :, 0]) grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr) y_pred_grd_lm = grd_lm.predict_proba( grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1] fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm) # The gradient boosted model by itself y_pred_grd = grd.predict_proba(X_test)[:, 1] fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd) # The random forest model by itself y_pred_rf = rf.predict_proba(X_test)[:, 1] fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf) plt.figure(1) plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR') plt.plot(fpr_rf, tpr_rf, label='RF') plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR') plt.plot(fpr_grd, tpr_grd, label='GBT') plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.legend(loc='best') plt.show() plt.figure(2) plt.xlim(0, 0.2) plt.ylim(0.8, 1) plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR') plt.plot(fpr_rf, tpr_rf, label='RF') plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR') plt.plot(fpr_grd, tpr_grd, label='GBT') plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve (zoomed in at top left)') plt.legend(loc='best') plt.show()
bsd-3-clause
zerkrx/zerkbox
lib/websockets/client.py
11
6563
""" The :mod:`websockets.client` module defines a simple WebSocket client API. """ import asyncio import collections.abc import email.message from .exceptions import InvalidHandshake from .handshake import build_request, check_response from .http import USER_AGENT, read_response from .protocol import CONNECTING, OPEN, WebSocketCommonProtocol from .uri import parse_uri __all__ = ['connect', 'WebSocketClientProtocol'] class WebSocketClientProtocol(WebSocketCommonProtocol): """ Complete WebSocket client implementation as an :class:`asyncio.Protocol`. This class inherits most of its methods from :class:`~websockets.protocol.WebSocketCommonProtocol`. """ is_client = True state = CONNECTING @asyncio.coroutine def handshake(self, wsuri, origin=None, subprotocols=None, extra_headers=None): """ Perform the client side of the opening handshake. If provided, ``origin`` sets the Origin HTTP header. If provided, ``subprotocols`` is a list of supported subprotocols in order of decreasing preference. If provided, ``extra_headers`` sets additional HTTP request headers. It must be a mapping or an iterable of (name, value) pairs. """ headers = [] set_header = lambda k, v: headers.append((k, v)) if wsuri.port == (443 if wsuri.secure else 80): # pragma: no cover set_header('Host', wsuri.host) else: set_header('Host', '{}:{}'.format(wsuri.host, wsuri.port)) if origin is not None: set_header('Origin', origin) if subprotocols is not None: set_header('Sec-WebSocket-Protocol', ', '.join(subprotocols)) if extra_headers is not None: if isinstance(extra_headers, collections.abc.Mapping): extra_headers = extra_headers.items() for name, value in extra_headers: set_header(name, value) set_header('User-Agent', USER_AGENT) key = build_request(set_header) self.request_headers = email.message.Message() for name, value in headers: self.request_headers[name] = value self.raw_request_headers = headers # Send handshake request. Since the URI and the headers only contain # ASCII characters, we can keep this simple. request = ['GET %s HTTP/1.1' % wsuri.resource_name] request.extend('{}: {}'.format(k, v) for k, v in headers) request.append('\r\n') request = '\r\n'.join(request).encode() self.writer.write(request) # Read handshake response. try: status_code, headers = yield from read_response(self.reader) except ValueError as exc: raise InvalidHandshake("Malformed HTTP message") from exc if status_code != 101: raise InvalidHandshake("Bad status code: {}".format(status_code)) self.response_headers = headers self.raw_response_headers = list(headers.raw_items()) get_header = lambda k: headers.get(k, '') check_response(get_header, key) self.subprotocol = headers.get('Sec-WebSocket-Protocol', None) if (self.subprotocol is not None and self.subprotocol not in subprotocols): raise InvalidHandshake( "Unknown subprotocol: {}".format(self.subprotocol)) assert self.state == CONNECTING self.state = OPEN self.opening_handshake.set_result(True) @asyncio.coroutine def connect(uri, *, klass=WebSocketClientProtocol, timeout=10, max_size=2 ** 20, max_queue=2 ** 5, loop=None, legacy_recv=False, origin=None, subprotocols=None, extra_headers=None, **kwds): """ This coroutine connects to a WebSocket server at a given ``uri``. It yields a :class:`WebSocketClientProtocol` which can then be used to send and receive messages. :func:`connect` is a wrapper around the event loop's :meth:`~asyncio.BaseEventLoop.create_connection` method. Extra keyword arguments are passed to :meth:`~asyncio.BaseEventLoop.create_connection`. For example, you can set the ``ssl`` keyword argument to a :class:`~ssl.SSLContext` to enforce some TLS settings. When connecting to a ``wss://`` URI, if this argument isn't provided explicitly, it's set to ``True``, which means Python's default :class:`~ssl.SSLContext` is used. The behavior of the ``timeout``, ``max_size``, and ``max_queue`` optional arguments is described the documentation of :class:`~websockets.protocol.WebSocketCommonProtocol`. :func:`connect` also accepts the following optional arguments: * ``origin`` sets the Origin HTTP header * ``subprotocols`` is a list of supported subprotocols in order of decreasing preference * ``extra_headers`` sets additional HTTP request headers – it can be a mapping or an iterable of (name, value) pairs :func:`connect` raises :exc:`~websockets.uri.InvalidURI` if ``uri`` is invalid and :exc:`~websockets.handshake.InvalidHandshake` if the opening handshake fails. On Python 3.5, :func:`connect` can be used as a asynchronous context manager. In that case, the connection is closed when exiting the context. """ if loop is None: loop = asyncio.get_event_loop() wsuri = parse_uri(uri) if wsuri.secure: kwds.setdefault('ssl', True) elif 'ssl' in kwds: raise ValueError("connect() received a SSL context for a ws:// URI. " "Use a wss:// URI to enable TLS.") factory = lambda: klass( host=wsuri.host, port=wsuri.port, secure=wsuri.secure, timeout=timeout, max_size=max_size, max_queue=max_queue, loop=loop, legacy_recv=legacy_recv, ) transport, protocol = yield from loop.create_connection( factory, wsuri.host, wsuri.port, **kwds) try: yield from protocol.handshake( wsuri, origin=origin, subprotocols=subprotocols, extra_headers=extra_headers) except Exception: yield from protocol.close_connection(force=True) raise return protocol try: from .py35.client import Connect except (SyntaxError, ImportError): # pragma: no cover pass else: Connect.__wrapped__ = connect # Copy over docstring to support building documentation on Python 3.5. Connect.__doc__ = connect.__doc__ connect = Connect
gpl-3.0
jemekite/youtube-dl
youtube_dl/extractor/bbc.py
52
32832
# coding: utf-8 from __future__ import unicode_literals import re import xml.etree.ElementTree from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, parse_duration, parse_iso8601, ) from ..compat import compat_HTTPError class BBCCoUkIE(InfoExtractor): IE_NAME = 'bbc.co.uk' IE_DESC = 'BBC iPlayer' _VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})' _MEDIASELECTOR_URLS = [ 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s', ] _TESTS = [ { 'url': 'http://www.bbc.co.uk/programmes/b039g8p7', 'info_dict': { 'id': 'b039d07m', 'ext': 'flv', 'title': 'Kaleidoscope, Leonard Cohen', 'description': 'The Canadian poet and songwriter reflects on his musical career.', 'duration': 1740, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/', 'info_dict': { 'id': 'b00yng1d', 'ext': 'flv', 'title': 'The Man in Black: Series 3: The Printed Name', 'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.", 'duration': 1800, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Episode is no longer available on BBC iPlayer Radio', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/', 'info_dict': { 'id': 'b00yng1d', 'ext': 'flv', 'title': 'The Voice UK: Series 3: Blind Auditions 5', 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.", 'duration': 5100, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion', 'info_dict': { 'id': 'b03k3pb7', 'ext': 'flv', 'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction", 'description': '2. Invasion', 'duration': 3600, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only', }, { 'url': 'http://www.bbc.co.uk/programmes/b04v20dw', 'info_dict': { 'id': 'b04v209v', 'ext': 'flv', 'title': 'Pete Tong, The Essential New Tune Special', 'description': "Pete has a very special mix - all of 2014's Essential New Tunes!", 'duration': 10800, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/music/clips/p02frcc3', 'note': 'Audio', 'info_dict': { 'id': 'p02frcch', 'ext': 'flv', 'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix', 'description': 'French house superstar Madeon takes us out of the club and onto the after party.', 'duration': 3507, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/music/clips/p025c0zz', 'note': 'Video', 'info_dict': { 'id': 'p025c103', 'ext': 'flv', 'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)', 'description': 'Rae Morris performs Closer for BBC Three at Reading 2014', 'duration': 226, }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls', 'info_dict': { 'id': 'p02n76xf', 'ext': 'flv', 'title': 'Natural World, 2015-2016: 2. Super Powered Owls', 'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d', 'duration': 3540, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'geolocation', }, { 'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition', 'info_dict': { 'id': 'b05zmgw1', 'ext': 'flv', 'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.', 'title': 'Royal Academy Summer Exhibition', 'duration': 3540, }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'geolocation', }, { 'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/music/clips#p02frcc3', 'only_matching': True, }, { 'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo', 'only_matching': True, } ] class MediaSelectionError(Exception): def __init__(self, id): self.id = id def _extract_asx_playlist(self, connection, programme_id): asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist') return [ref.get('href') for ref in asx.findall('./Entry/ref')] def _extract_connection(self, connection, programme_id): formats = [] protocol = connection.get('protocol') supplier = connection.get('supplier') if protocol == 'http': href = connection.get('href') transfer_format = connection.get('transferFormat') # ASX playlist if supplier == 'asx': for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)): formats.append({ 'url': ref, 'format_id': 'ref%s_%s' % (i, supplier), }) # Skip DASH until supported elif transfer_format == 'dash': pass # Direct link else: formats.append({ 'url': href, 'format_id': supplier, }) elif protocol == 'rtmp': application = connection.get('application', 'ondemand') auth_string = connection.get('authString') identifier = connection.get('identifier') server = connection.get('server') formats.append({ 'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string), 'play_path': identifier, 'app': '%s?%s' % (application, auth_string), 'page_url': 'http://www.bbc.co.uk', 'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf', 'rtmp_live': False, 'ext': 'flv', 'format_id': supplier, }) return formats def _extract_items(self, playlist): return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item') def _extract_medias(self, media_selection): error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error') if error is not None: raise BBCCoUkIE.MediaSelectionError(error.get('id')) return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media') def _extract_connections(self, media): return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection') def _extract_video(self, media, programme_id): formats = [] vbr = int_or_none(media.get('bitrate')) vcodec = media.get('encoding') service = media.get('service') width = int_or_none(media.get('width')) height = int_or_none(media.get('height')) file_size = int_or_none(media.get('media_file_size')) for connection in self._extract_connections(media): conn_formats = self._extract_connection(connection, programme_id) for format in conn_formats: format.update({ 'format_id': '%s_%s' % (service, format['format_id']), 'width': width, 'height': height, 'vbr': vbr, 'vcodec': vcodec, 'filesize': file_size, }) formats.extend(conn_formats) return formats def _extract_audio(self, media, programme_id): formats = [] abr = int_or_none(media.get('bitrate')) acodec = media.get('encoding') service = media.get('service') for connection in self._extract_connections(media): conn_formats = self._extract_connection(connection, programme_id) for format in conn_formats: format.update({ 'format_id': '%s_%s' % (service, format['format_id']), 'abr': abr, 'acodec': acodec, }) formats.extend(conn_formats) return formats def _get_subtitles(self, media, programme_id): subtitles = {} for connection in self._extract_connections(media): captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions') lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en') subtitles[lang] = [ { 'url': connection.get('href'), 'ext': 'ttml', }, ] return subtitles def _raise_extractor_error(self, media_selection_error): raise ExtractorError( '%s returned error: %s' % (self.IE_NAME, media_selection_error.id), expected=True) def _download_media_selector(self, programme_id): last_exception = None for mediaselector_url in self._MEDIASELECTOR_URLS: try: return self._download_media_selector_url( mediaselector_url % programme_id, programme_id) except BBCCoUkIE.MediaSelectionError as e: if e.id == 'notukerror': last_exception = e continue self._raise_extractor_error(e) self._raise_extractor_error(last_exception) def _download_media_selector_url(self, url, programme_id=None): try: media_selection = self._download_xml( url, programme_id, 'Downloading media selection XML') except ExtractorError as ee: if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403: media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8')) else: raise return self._process_media_selector(media_selection, programme_id) def _process_media_selector(self, media_selection, programme_id): formats = [] subtitles = None for media in self._extract_medias(media_selection): kind = media.get('kind') if kind == 'audio': formats.extend(self._extract_audio(media, programme_id)) elif kind == 'video': formats.extend(self._extract_video(media, programme_id)) elif kind == 'captions': subtitles = self.extract_subtitles(media, programme_id) return formats, subtitles def _download_playlist(self, playlist_id): try: playlist = self._download_json( 'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id, playlist_id, 'Downloading playlist JSON') version = playlist.get('defaultAvailableVersion') if version: smp_config = version['smpConfig'] title = smp_config['title'] description = smp_config['summary'] for item in smp_config['items']: kind = item['kind'] if kind != 'programme' and kind != 'radioProgramme': continue programme_id = item.get('vpid') duration = int_or_none(item.get('duration')) formats, subtitles = self._download_media_selector(programme_id) return programme_id, title, description, duration, formats, subtitles except ExtractorError as ee: if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404): raise # fallback to legacy playlist return self._process_legacy_playlist(playlist_id) def _process_legacy_playlist_url(self, url, display_id): playlist = self._download_legacy_playlist_url(url, display_id) return self._extract_from_legacy_playlist(playlist, display_id) def _process_legacy_playlist(self, playlist_id): return self._process_legacy_playlist_url( 'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id, playlist_id) def _download_legacy_playlist_url(self, url, playlist_id=None): return self._download_xml( url, playlist_id, 'Downloading legacy playlist XML') def _extract_from_legacy_playlist(self, playlist, playlist_id): no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems') if no_items is not None: reason = no_items.get('reason') if reason == 'preAvailability': msg = 'Episode %s is not yet available' % playlist_id elif reason == 'postAvailability': msg = 'Episode %s is no longer available' % playlist_id elif reason == 'noMedia': msg = 'Episode %s is not currently available' % playlist_id else: msg = 'Episode %s is not available: %s' % (playlist_id, reason) raise ExtractorError(msg, expected=True) for item in self._extract_items(playlist): kind = item.get('kind') if kind != 'programme' and kind != 'radioProgramme': continue title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text def get_programme_id(item): def get_from_attributes(item): for p in('identifier', 'group'): value = item.get(p) if value and re.match(r'^[pb][\da-z]{7}$', value): return value get_from_attributes(item) mediator = item.find('./{http://bbc.co.uk/2008/emp/playlist}mediator') if mediator is not None: return get_from_attributes(mediator) programme_id = get_programme_id(item) duration = int_or_none(item.get('duration')) # TODO: programme_id can be None and media items can be incorporated right inside # playlist's item (e.g. http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu) # as f4m and m3u8 formats, subtitles = self._download_media_selector(programme_id) return programme_id, title, description, duration, formats, subtitles def _real_extract(self, url): group_id = self._match_id(url) webpage = self._download_webpage(url, group_id, 'Downloading video page') programme_id = None tviplayer = self._search_regex( r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById', webpage, 'player', default=None) if tviplayer: player = self._parse_json(tviplayer, group_id).get('player', {}) duration = int_or_none(player.get('duration')) programme_id = player.get('vpid') if not programme_id: programme_id = self._search_regex( r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None) if programme_id: formats, subtitles = self._download_media_selector(programme_id) title = self._og_search_title(webpage) description = self._search_regex( r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>', webpage, 'description', fatal=False) else: programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) self._sort_formats(formats) return { 'id': programme_id, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'formats': formats, 'subtitles': subtitles, } class BBCIE(BBCCoUkIE): IE_NAME = 'bbc' IE_DESC = 'BBC' _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P<id>[^/#?]+)' _MEDIASELECTOR_URLS = [ # Provides more formats, namely direct mp4 links, but fails on some videos with # notukerror for non UK (?) users (e.g. # http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret) 'http://open.live.bbc.co.uk/mediaselector/4/mtis/stream/%s', # Provides fewer formats, but works everywhere for everybody (hopefully) 'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/journalism-pc/vpid/%s', ] _TESTS = [{ # article with multiple videos embedded with data-media-meta containing # playlist.sxml, externalId and no direct video links 'url': 'http://www.bbc.com/news/world-europe-32668511', 'info_dict': { 'id': 'world-europe-32668511', 'title': 'Russia stages massive WW2 parade despite Western boycott', 'description': 'md5:00ff61976f6081841f759a08bf78cc9c', }, 'playlist_count': 2, }, { # article with multiple videos embedded with data-media-meta (more videos) 'url': 'http://www.bbc.com/news/business-28299555', 'info_dict': { 'id': 'business-28299555', 'title': 'Farnborough Airshow: Video highlights', 'description': 'BBC reports and video highlights at the Farnborough Airshow.', }, 'playlist_count': 9, 'skip': 'Save time', }, { # article with multiple videos embedded with `new SMP()` 'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460', 'info_dict': { 'id': '3662a707-0af9-3149-963f-47bea720b460', 'title': 'BBC Blogs - Adam Curtis - BUGGER', }, 'playlist_count': 18, }, { # single video embedded with mediaAssetPage.init() 'url': 'http://www.bbc.com/news/world-europe-32041533', 'info_dict': { 'id': 'p02mprgb', 'ext': 'mp4', 'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV', 'duration': 47, 'timestamp': 1427219242, 'upload_date': '20150324', }, 'params': { # rtmp download 'skip_download': True, } }, { # article with single video embedded with data-media-meta containing # direct video links (for now these are extracted) and playlist.xml (with # media items as f4m and m3u8 - currently unsupported) 'url': 'http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu', 'info_dict': { 'id': '150615_telabyad_kentin_cogu', 'ext': 'mp4', 'title': "YPG: Tel Abyad'ın tamamı kontrolümüzde", 'duration': 47, 'timestamp': 1434397334, 'upload_date': '20150615', }, 'params': { 'skip_download': True, } }, { # single video embedded with mediaAssetPage.init() (regional section) 'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw', 'info_dict': { 'id': '150619_video_honduras_militares_hospitales_corrupcion_aw', 'ext': 'mp4', 'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción', 'duration': 87, 'timestamp': 1434713142, 'upload_date': '20150619', }, 'params': { 'skip_download': True, } }, { # single video from video playlist embedded with vxp-playlist-data JSON 'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376', 'info_dict': { 'id': 'p02w6qjc', 'ext': 'mp4', 'title': '''Judge Mindy Glazer: "I'm sorry to see you here... I always wondered what happened to you"''', 'duration': 56, }, 'params': { 'skip_download': True, } }, { # single video story with digitalData 'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret', 'info_dict': { 'id': 'p02q6gc4', 'ext': 'flv', 'title': 'Sri Lanka’s spicy secret', 'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.', 'timestamp': 1437674293, 'upload_date': '20150723', }, 'params': { # rtmp download 'skip_download': True, } }, { # single video story without digitalData 'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star', 'info_dict': { 'id': 'p018zqqg', 'ext': 'mp4', 'title': 'Hyundai Santa Fe Sport: Rock star', 'description': 'md5:b042a26142c4154a6e472933cf20793d', 'timestamp': 1368473503, 'upload_date': '20130513', }, 'params': { # rtmp download 'skip_download': True, } }, { # single video with playlist.sxml URL 'url': 'http://www.bbc.com/sport/0/football/33653409', 'info_dict': { 'id': 'p02xycnp', 'ext': 'mp4', 'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?', 'description': 'md5:398fca0e2e701c609d726e034fa1fc89', 'duration': 140, }, 'params': { # rtmp download 'skip_download': True, } }, { # single video with playlist URL from weather section 'url': 'http://www.bbc.com/weather/features/33601775', 'only_matching': True, }, { # custom redirection to www.bbc.com 'url': 'http://www.bbc.co.uk/news/science-environment-33661876', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if BBCCoUkIE.suitable(url) else super(BBCIE, cls).suitable(url) def _extract_from_media_meta(self, media_meta, video_id): # Direct links to media in media metadata (e.g. # http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu) # TODO: there are also f4m and m3u8 streams incorporated in playlist.sxml source_files = media_meta.get('sourceFiles') if source_files: return [{ 'url': f['url'], 'format_id': format_id, 'ext': f.get('encoding'), 'tbr': float_or_none(f.get('bitrate'), 1000), 'filesize': int_or_none(f.get('filesize')), } for format_id, f in source_files.items() if f.get('url')], [] programme_id = media_meta.get('externalId') if programme_id: return self._download_media_selector(programme_id) # Process playlist.sxml as legacy playlist href = media_meta.get('href') if href: playlist = self._download_legacy_playlist_url(href) _, _, _, _, formats, subtitles = self._extract_from_legacy_playlist(playlist, video_id) return formats, subtitles return [], [] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) timestamp = parse_iso8601(self._search_regex( [r'"datePublished":\s*"([^"]+)', r'<meta[^>]+property="article:published_time"[^>]+content="([^"]+)"', r'itemprop="datePublished"[^>]+datetime="([^"]+)"'], webpage, 'date', default=None)) # single video with playlist.sxml URL (e.g. http://www.bbc.com/sport/0/football/3365340ng) playlist = self._search_regex( r'<param[^>]+name="playlist"[^>]+value="([^"]+)"', webpage, 'playlist', default=None) if playlist: programme_id, title, description, duration, formats, subtitles = \ self._process_legacy_playlist_url(playlist, playlist_id) self._sort_formats(formats) return { 'id': programme_id, 'title': title, 'description': description, 'duration': duration, 'timestamp': timestamp, 'formats': formats, 'subtitles': subtitles, } # single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret) programme_id = self._search_regex( [r'data-video-player-vpid="([\da-z]{8})"', r'<param[^>]+name="externalIdentifier"[^>]+value="([\da-z]{8})"'], webpage, 'vpid', default=None) if programme_id: formats, subtitles = self._download_media_selector(programme_id) self._sort_formats(formats) # digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star) digital_data = self._parse_json( self._search_regex( r'var\s+digitalData\s*=\s*({.+?});?\n', webpage, 'digital data', default='{}'), programme_id, fatal=False) page_info = digital_data.get('page', {}).get('pageInfo', {}) title = page_info.get('pageName') or self._og_search_title(webpage) description = page_info.get('description') or self._og_search_description(webpage) timestamp = parse_iso8601(page_info.get('publicationDate')) or timestamp return { 'id': programme_id, 'title': title, 'description': description, 'timestamp': timestamp, 'formats': formats, 'subtitles': subtitles, } playlist_title = self._html_search_regex( r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title') playlist_description = self._og_search_description(webpage, default=None) def extract_all(pattern): return list(filter(None, map( lambda s: self._parse_json(s, playlist_id, fatal=False), re.findall(pattern, webpage)))) # Multiple video article (e.g. # http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460) EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+[\da-z]{8}(?:\b[^"]+)?' entries = [] for match in extract_all(r'new\s+SMP\(({.+?})\)'): embed_url = match.get('playerSettings', {}).get('externalEmbedUrl') if embed_url and re.match(EMBED_URL, embed_url): entries.append(embed_url) entries.extend(re.findall( r'setPlaylist\("(%s)"\)' % EMBED_URL, webpage)) if entries: return self.playlist_result( [self.url_result(entry, 'BBCCoUk') for entry in entries], playlist_id, playlist_title, playlist_description) # Multiple video article (e.g. http://www.bbc.com/news/world-europe-32668511) medias = extract_all(r"data-media-meta='({[^']+})'") if not medias: # Single video article (e.g. http://www.bbc.com/news/video_and_audio/international) media_asset = self._search_regex( r'mediaAssetPage\.init\(\s*({.+?}), "/', webpage, 'media asset', default=None) if media_asset: media_asset_page = self._parse_json(media_asset, playlist_id, fatal=False) medias = [] for video in media_asset_page.get('videos', {}).values(): medias.extend(video.values()) if not medias: # Multiple video playlist with single `now playing` entry (e.g. # http://www.bbc.com/news/video_and_audio/must_see/33767813) vxp_playlist = self._parse_json( self._search_regex( r'<script[^>]+class="vxp-playlist-data"[^>]+type="application/json"[^>]*>([^<]+)</script>', webpage, 'playlist data'), playlist_id) playlist_medias = [] for item in vxp_playlist: media = item.get('media') if not media: continue playlist_medias.append(media) # Download single video if found media with asset id matching the video id from URL if item.get('advert', {}).get('assetId') == playlist_id: medias = [media] break # Fallback to the whole playlist if not medias: medias = playlist_medias entries = [] for num, media_meta in enumerate(medias, start=1): formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id) if not formats: continue self._sort_formats(formats) video_id = media_meta.get('externalId') if not video_id: video_id = playlist_id if len(medias) == 1 else '%s-%s' % (playlist_id, num) title = media_meta.get('caption') if not title: title = playlist_title if len(medias) == 1 else '%s - Video %s' % (playlist_title, num) duration = int_or_none(media_meta.get('durationInSeconds')) or parse_duration(media_meta.get('duration')) images = [] for image in media_meta.get('images', {}).values(): images.extend(image.values()) if 'image' in media_meta: images.append(media_meta['image']) thumbnails = [{ 'url': image.get('href'), 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), } for image in images] entries.append({ 'id': video_id, 'title': title, 'thumbnails': thumbnails, 'duration': duration, 'timestamp': timestamp, 'formats': formats, 'subtitles': subtitles, }) return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
unlicense
DistributedOpenUnifiedGovernmentNetwork/mapwarper
public/cgi/tilecache/setup.py
7
1542
#!/usr/bin/env python import sys try: from setuptools import setup except: from ez_setup import use_setuptools use_setuptools() from setuptools import setup readme = file('docs/README.txt','rb').read() classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: GIS', ] # We'd like to let debian install the /etc/tilecache.cfg, # but put them in tilecache/tilecache.cfg using setuptools # otherwise. extra = { } if "--debian" in sys.argv: extra['data_files']=[('/etc', ['tilecache.cfg'])] sys.argv.remove("--debian") else: extra['data_files']=[('TileCache', ['tilecache.cfg'])] setup(name='TileCache', version='2.10', description='a web map tile caching system', author='MetaCarta Labs', author_email='tilecache@openlayers.org', url='http://tilecache.org/', long_description=readme, packages=['TileCache', 'TileCache.Caches', 'TileCache.Services', 'TileCache.Layers'], scripts=['tilecache.cgi', 'tilecache.fcgi', 'tilecache_seed.py', 'tilecache_install_config.py', 'tilecache_clean.py', 'tilecache_http_server.py'], zip_safe=False, test_suite = 'tests.run_doc_tests', license="BSD", classifiers=classifiers, **extra )
mit
mdhaman/superdesk-core
tests/privilege_test.py
7
1144
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from nose.tools import raises from superdesk.tests import TestCase from superdesk.privilege import privilege, get_privilege_list, _privileges class PrivilegeTestCase(TestCase): def setUp(self): _privileges_saved = _privileges.copy() _privileges.clear() def revert(): _privileges.clear() _privileges.update(_privileges_saved) self.addCleanup(revert) def test_privilege_registration(self): _privileges.clear() privilege(name='ingest', label='Ingest') privilege(name='archive', label='Archive') self.assertIn('ingest', _privileges) self.assertIn('archive', _privileges) self.assertEqual(2, len(get_privilege_list())) @raises(Exception) def test_privilege_name_has_no_dots(self): privilege(name='test.')
agpl-3.0
ssbarnea/ansible
test/support/integration/plugins/module_utils/docker/swarm.py
61
10842
# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl> # (c) Thierry Bouvet (@tbouvet) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from time import sleep try: from docker.errors import APIError, NotFound except ImportError: # missing Docker SDK for Python handled in ansible.module_utils.docker.common pass from ansible.module_utils._text import to_native from ansible.module_utils.docker.common import ( AnsibleDockerClient, LooseVersion, ) class AnsibleDockerSwarmClient(AnsibleDockerClient): def __init__(self, **kwargs): super(AnsibleDockerSwarmClient, self).__init__(**kwargs) def get_swarm_node_id(self): """ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID of Docker host the module is executed on :return: NodeID of host or 'None' if not part of Swarm """ try: info = self.info() except APIError as exc: self.fail("Failed to get node information for %s" % to_native(exc)) if info: json_str = json.dumps(info, ensure_ascii=False) swarm_info = json.loads(json_str) if swarm_info['Swarm']['NodeID']: return swarm_info['Swarm']['NodeID'] return None def check_if_swarm_node(self, node_id=None): """ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host system information looking if specific key in output exists. If 'node_id' is provided then it tries to read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if it is not executed on Swarm manager :param node_id: Node identifier :return: bool: True if node is part of Swarm, False otherwise """ if node_id is None: try: info = self.info() except APIError: self.fail("Failed to get host information.") if info: json_str = json.dumps(info, ensure_ascii=False) swarm_info = json.loads(json_str) if swarm_info['Swarm']['NodeID']: return True if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'): return True return False else: try: node_info = self.get_node_inspect(node_id=node_id) except APIError: return if node_info['ID'] is not None: return True return False def check_if_swarm_manager(self): """ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action is performed. The inspect_swarm() will fail if node is not a manager :return: True if node is Swarm Manager, False otherwise """ try: self.inspect_swarm() return True except APIError: return False def fail_task_if_not_swarm_manager(self): """ If host is not a swarm manager then Ansible task on this host should end with 'failed' state """ if not self.check_if_swarm_manager(): self.fail("Error running docker swarm module: must run on swarm manager node") def check_if_swarm_worker(self): """ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node() :return: True if node is Swarm Worker, False otherwise """ if self.check_if_swarm_node() and not self.check_if_swarm_manager(): return True return False def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1): """ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or host that is not part of Swarm it will fail the playbook :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once :param node_id: node ID or name, if None then method will try to get node_id of host module run on :return: True if node is part of swarm but its state is down, False otherwise """ if repeat_check < 1: repeat_check = 1 if node_id is None: node_id = self.get_swarm_node_id() for retry in range(0, repeat_check): if retry > 0: sleep(5) node_info = self.get_node_inspect(node_id=node_id) if node_info['Status']['State'] == 'down': return True return False def get_node_inspect(self, node_id=None, skip_missing=False): """ Returns Swarm node info as in 'docker node inspect' command about single node :param skip_missing: if True then function will return None instead of failing the task :param node_id: node ID or name, if None then method will try to get node_id of host module run on :return: Single node information structure """ if node_id is None: node_id = self.get_swarm_node_id() if node_id is None: self.fail("Failed to get node information.") try: node_info = self.inspect_node(node_id=node_id) except APIError as exc: if exc.status_code == 503: self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") if exc.status_code == 404: if skip_missing: return None self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) except Exception as exc: self.fail("Error inspecting swarm node: %s" % exc) json_str = json.dumps(node_info, ensure_ascii=False) node_info = json.loads(json_str) if 'ManagerStatus' in node_info: if node_info['ManagerStatus'].get('Leader'): # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 # Check moby/moby#35437 for details count_colons = node_info['ManagerStatus']['Addr'].count(":") if count_colons == 1: swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr'] else: swarm_leader_ip = node_info['Status']['Addr'] node_info['Status']['Addr'] = swarm_leader_ip return node_info def get_all_nodes_inspect(self): """ Returns Swarm node info as in 'docker node inspect' command about all registered nodes :return: Structure with information about all nodes """ try: node_info = self.nodes() except APIError as exc: if exc.status_code == 503: self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager") self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) except Exception as exc: self.fail("Error inspecting swarm node: %s" % exc) json_str = json.dumps(node_info, ensure_ascii=False) node_info = json.loads(json_str) return node_info def get_all_nodes_list(self, output='short'): """ Returns list of nodes registered in Swarm :param output: Defines format of returned data :return: If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm, if 'output' is 'long' then returns data is list of dict containing the attributes as in output of command 'docker node ls' """ nodes_list = [] nodes_inspect = self.get_all_nodes_inspect() if nodes_inspect is None: return None if output == 'short': for node in nodes_inspect: nodes_list.append(node['Description']['Hostname']) elif output == 'long': for node in nodes_inspect: node_property = {} node_property.update({'ID': node['ID']}) node_property.update({'Hostname': node['Description']['Hostname']}) node_property.update({'Status': node['Status']['State']}) node_property.update({'Availability': node['Spec']['Availability']}) if 'ManagerStatus' in node: if node['ManagerStatus']['Leader'] is True: node_property.update({'Leader': True}) node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']}) node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']}) nodes_list.append(node_property) else: return None return nodes_list def get_node_name_by_id(self, nodeid): return self.get_node_inspect(nodeid)['Description']['Hostname'] def get_unlock_key(self): if self.docker_py_version < LooseVersion('2.7.0'): return None return super(AnsibleDockerSwarmClient, self).get_unlock_key() def get_service_inspect(self, service_id, skip_missing=False): """ Returns Swarm service info as in 'docker service inspect' command about single service :param service_id: service ID or name :param skip_missing: if True then function will return None instead of failing the task :return: Single service information structure """ try: service_info = self.inspect_service(service_id) except NotFound as exc: if skip_missing is False: self.fail("Error while reading from Swarm manager: %s" % to_native(exc)) else: return None except APIError as exc: if exc.status_code == 503: self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager") self.fail("Error inspecting swarm service: %s" % exc) except Exception as exc: self.fail("Error inspecting swarm service: %s" % exc) json_str = json.dumps(service_info, ensure_ascii=False) service_info = json.loads(json_str) return service_info
gpl-3.0
xbed/Mixly_Arduino
mixly_arduino/mixpyBuild/aip/base.py
1
7658
# -*- coding: utf-8 -*- """ AipBase """ import hmac import json import hashlib import datetime import base64 import time import sys import requests requests.packages.urllib3.disable_warnings() if sys.version_info.major == 2: from urllib import urlencode from urllib import quote from urlparse import urlparse else: from urllib.parse import urlencode from urllib.parse import quote from urllib.parse import urlparse class AipBase(object): """ AipBase """ __accessTokenUrl = 'https://aip.baidubce.com/oauth/2.0/token' __reportUrl = 'https://aip.baidubce.com/rpc/2.0/feedback/v1/report' __scope = 'brain_all_scope' def __init__(self, appId, apiKey, secretKey): """ AipBase(appId, apiKey, secretKey) """ self._appId = appId.strip() self._apiKey = apiKey.strip() self._secretKey = secretKey.strip() self._authObj = {} self._isCloudUser = None self.__client = requests self.__connectTimeout = 60.0 self.__socketTimeout = 60.0 self._proxies = {} self.__version = '2_2_16' def getVersion(self): """ version """ return self.__version def setConnectionTimeoutInMillis(self, ms): """ setConnectionTimeoutInMillis """ self.__connectTimeout = ms / 1000.0 def setSocketTimeoutInMillis(self, ms): """ setSocketTimeoutInMillis """ self.__socketTimeout = ms / 1000.0 def setProxies(self, proxies): """ proxies """ self._proxies = proxies def _request(self, url, data, headers=None): """ self._request('', {}) """ try: result = self._validate(url, data) if result != True: return result authObj = self._auth() params = self._getParams(authObj) data = self._proccessRequest(url, params, data, headers) headers = self._getAuthHeaders('POST', url, params, headers) response = self.__client.post(url, data=data, params=params, headers=headers, verify=False, timeout=( self.__connectTimeout, self.__socketTimeout, ), proxies=self._proxies ) obj = self._proccessResult(response.content) if not self._isCloudUser and obj.get('error_code', '') == 110: authObj = self._auth(True) params = self._getParams(authObj) response = self.__client.post(url, data=data, params=params, headers=headers, verify=False, timeout=( self.__connectTimeout, self.__socketTimeout, ), proxies=self._proxies ) obj = self._proccessResult(response.content) except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectTimeout) as e: return { 'error_code': 'SDK108', 'error_msg': 'connection or read data timeout', } return obj def _validate(self, url, data): """ validate """ return True def _proccessRequest(self, url, params, data, headers): """ 参数处理 """ params['aipSdk'] = 'python' params['aipVersion'] = self.__version return data def _proccessResult(self, content): """ formate result """ if sys.version_info.major == 2: return json.loads(content) or {} else: return json.loads(content.decode()) or {} def _auth(self, refresh=False): """ api access auth """ #未过期 if not refresh: tm = self._authObj.get('time', 0) + int(self._authObj.get('expires_in', 0)) - 30 if tm > int(time.time()): return self._authObj obj = self.__client.get(self.__accessTokenUrl, verify=False, params={ 'grant_type': 'client_credentials', 'client_id': self._apiKey, 'client_secret': self._secretKey, }, timeout=( self.__connectTimeout, self.__socketTimeout, ), proxies=self._proxies).json() self._isCloudUser = not self._isPermission(obj) obj['time'] = int(time.time()) self._authObj = obj return obj def _isPermission(self, authObj): """ check whether permission """ scopes = authObj.get('scope', '') return self.__scope in scopes.split(' ') def _getParams(self, authObj): """ api request http url params """ params = {} if self._isCloudUser == False: params['access_token'] = authObj['access_token'] return params def _getAuthHeaders(self, method, url, params=None, headers=None): """ api request http headers """ headers = headers or {} params = params or {} if self._isCloudUser == False: return headers urlResult = urlparse(url) for kv in urlResult.query.strip().split('&'): if kv: k, v = kv.split('=') params[k] = v # UTC timestamp timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') headers['Host'] = urlResult.hostname headers['x-bce-date'] = timestamp version, expire = '1', '1800' # 1 Generate SigningKey val = "bce-auth-v%s/%s/%s/%s" % (version, self._apiKey, timestamp, expire) signingKey = hmac.new(self._secretKey.encode('utf-8'), val.encode('utf-8'), hashlib.sha256 ).hexdigest() # 2 Generate CanonicalRequest # 2.1 Genrate CanonicalURI canonicalUri = quote(urlResult.path) # 2.2 Generate CanonicalURI: not used here # 2.3 Generate CanonicalHeaders: only include host here canonicalHeaders = [] for header, val in headers.items(): canonicalHeaders.append( '%s:%s' % ( quote(header.strip(), '').lower(), quote(val.strip(), '') ) ) canonicalHeaders = '\n'.join(sorted(canonicalHeaders)) # 2.4 Generate CanonicalRequest canonicalRequest = '%s\n%s\n%s\n%s' % ( method.upper(), canonicalUri, '&'.join(sorted(urlencode(params).split('&'))), canonicalHeaders ) # 3 Generate Final Signature signature = hmac.new(signingKey.encode('utf-8'), canonicalRequest.encode('utf-8'), hashlib.sha256 ).hexdigest() headers['authorization'] = 'bce-auth-v%s/%s/%s/%s/%s/%s' % ( version, self._apiKey, timestamp, expire, ';'.join(headers.keys()).lower(), signature ) return headers def report(self, feedback): """ 数据反馈 """ data = {} data['feedback'] = feedback return self._request(self.__reportUrl, data) def post(self, url, data, headers=None): """ self.post('', {}) """ return self._request(url, data, headers)
apache-2.0
akshaynathr/mailman
src/mailman/rest/users.py
2
6209
# Copyright (C) 2011-2012 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. """REST for users.""" from __future__ import absolute_import, unicode_literals __metaclass__ = type __all__ = [ 'AUser', 'AllUsers', ] from flufl.password import lookup, make_secret, generate from restish import http, resource from uuid import UUID from zope.component import getUtility from mailman.config import config from mailman.interfaces.address import ExistingAddressError from mailman.interfaces.usermanager import IUserManager from mailman.rest.addresses import UserAddresses from mailman.rest.helpers import CollectionMixin, etag, no_content, path_to from mailman.rest.preferences import Preferences from mailman.rest.validator import Validator class _UserBase(resource.Resource, CollectionMixin): """Shared base class for user representations.""" def _resource_as_dict(self, user): """See `CollectionMixin`.""" # The canonical URL for a user is their unique user id, although we # can always look up a user based on any registered and validated # email address associated with their account. The user id is a UUID, # but we serialize its integer equivalent. user_id = user.user_id.int resource = dict( user_id=user_id, created_on=user.created_on, self_link=path_to('users/{0}'.format(user_id)), ) # Add the password attribute, only if the user has a password. Same # with the real name. These could be None or the empty string. if user.password: resource['password'] = user.password if user.display_name: resource['display_name'] = user.display_name return resource def _get_collection(self, request): """See `CollectionMixin`.""" return list(getUtility(IUserManager).users) class AllUsers(_UserBase): """The users.""" @resource.GET() def collection(self, request): """/users""" resource = self._make_collection(request) return http.ok([], etag(resource)) @resource.POST() def create(self, request): """Create a new user.""" try: validator = Validator(email=unicode, display_name=unicode, password=unicode, _optional=('display_name', 'password')) arguments = validator(request) except ValueError as error: return http.bad_request([], str(error)) # We can't pass the 'password' argument to the user creation method, # so strip that out (if it exists), then create the user, adding the # password after the fact if successful. password = arguments.pop('password', None) try: user = getUtility(IUserManager).create_user(**arguments) except ExistingAddressError as error: return http.bad_request([], b'Address already exists {0}'.format( error.email)) if password is None: # This will have to be reset since it cannot be retrieved. password = generate(int(config.passwords.password_length)) scheme = lookup(config.passwords.password_scheme.upper()) user.password = make_secret(password, scheme) location = path_to('users/{0}'.format(user.user_id.int)) return http.created(location, [], None) class AUser(_UserBase): """A user.""" def __init__(self, user_identifier): """Get a user by various type of identifiers. :param user_identifier: The identifier used to retrieve the user. The identifier may either be an integer user-id, or an email address controlled by the user. The type of identifier is auto-detected by looking for an `@` symbol, in which case it's taken as an email address, otherwise it's assumed to be an integer. :type user_identifier: string """ user_manager = getUtility(IUserManager) if '@' in user_identifier: self._user = user_manager.get_user(user_identifier) else: # The identifier is the string representation of an integer that # must be converted to a UUID. try: user_id = UUID(int=int(user_identifier)) except ValueError: self._user = None else: self._user = user_manager.get_user_by_id(user_id) @resource.GET() def user(self, request): """Return a single user end-point.""" if self._user is None: return http.not_found() return http.ok([], self._resource_as_json(self._user)) @resource.child() def addresses(self, request, segments): """/users/<uid>/addresses""" return UserAddresses(self._user) @resource.DELETE() def delete_user(self, request): """Delete the named user.""" if self._user is None: return http.not_found() getUtility(IUserManager).delete_user(self._user) return no_content() @resource.child() def preferences(self, request, segments): """/addresses/<email>/preferences""" if len(segments) != 0: return http.bad_request() if self._user is None: return http.not_found() child = Preferences( self._user.preferences, 'users/{0}'.format(self._user.user_id.int)) return child, []
gpl-3.0
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/firebase_token_generator.py
2
4985
from array import array from base64 import urlsafe_b64encode import hashlib import hmac import sys try: import json except ImportError: import simplejson as json import time import datetime __all__ = ['create_token'] TOKEN_VERSION = 0 TOKEN_SEP = '.' CLAIMS_MAP = { 'expires': 'exp', 'notBefore': 'nbf', 'admin': 'admin', 'debug': 'debug', 'simulate': 'simulate' } def create_token(secret, data, options=None): """ Generates a secure authentication token. Our token format follows the JSON Web Token (JWT) standard: header.claims.signature Where: 1) "header" is a stringified, base64-encoded JSON object containing version and algorithm information. 2) "claims" is a stringified, base64-encoded JSON object containing a set of claims: Library-generated claims: "iat" -> The issued at time in seconds since the epoch as a number "d" -> The arbitrary JSON object supplied by the user. User-supplied claims (these are all optional): "exp" (optional) -> The expiration time of this token, as a number of seconds since the epoch. "nbf" (optional) -> The "not before" time before which the token should be rejected (seconds since the epoch) "admin" (optional) -> If set to true, this client will bypass all security rules (use this to authenticate servers) "debug" (optional) -> "set to true to make this client receive debug information about security rule execution. "simulate" (optional, internal-only for now) -> Set to true to neuter all API operations (listens / puts will run security rules but not actually write or return data). 3) A signature that proves the validity of this token (see: http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-07) For base64-encoding we use URL-safe base64 encoding. This ensures that the entire token is URL-safe and could, for instance, be placed as a query argument without any encoding (and this is what the JWT spec requires). Args: secret - the Firebase Application secret data - a json serializable object of data to be included in the token options - An optional dictionary of additional claims for the token. Possible keys include: a) "expires" -- A datetime or timestamp (as a number of seconds since the epoch) denoting a time after which this token should no longer be valid. b) "notBefore" -- A datetime or timestamp (as a number of seconds since the epoch) denoting a time before which this token should be rejected by the server. c) "admin" -- Set to true to bypass all security rules (use this for your trusted servers). d) "debug" -- Set to true to enable debug mode (so you can see the results of Rules API operations) e) "simulate" -- (internal-only for now) Set to true to neuter all API operations (listens / puts will run security rules but not actually write or return data) Returns: A signed Firebase Authentication Token Raises: ValueError: if an invalid key is specified in options """ if not options and not data: raise ValueError("firebase_token_generator.create_token: data is empty and no options are set. This token will have no effect on Firebase."); if not options: options = {} claims = _create_options_claims(options) claims['v'] = TOKEN_VERSION claims['iat'] = int(time.mktime(time.gmtime())) claims['d'] = data return _encode_token(secret, claims) def _create_options_claims(opts): claims = {} for k in opts: if (isinstance(opts[k], datetime.datetime)): opts[k] = int(time.mktime(opts[k].timetuple())) if k in CLAIMS_MAP: claims[CLAIMS_MAP[k]] = opts[k] else: raise ValueError('Unrecognized Option: %s' % k) return claims if sys.version_info < (2, 7): def _encode(bytes_data): # Python 2.6 has problems with bytearrays in b64 encoded = urlsafe_b64encode(bytes(bytes_data)) return encoded.decode('utf-8').replace('=', '') else: def _encode(bytes): encoded = urlsafe_b64encode(bytes) return encoded.decode('utf-8').replace('=', '') def _encode_json(obj): return _encode(bytearray(json.dumps(obj), 'utf-8')) def _sign(secret, to_sign): def portable_bytes(s): try: return bytes(s, 'utf-8') except TypeError: return bytes(s) return _encode(hmac.new(portable_bytes(secret), portable_bytes(to_sign), hashlib.sha256).digest()) def _encode_token(secret, claims): encoded_header = _encode_json({'typ': 'JWT', 'alg': 'HS256'}) encoded_claims = _encode_json(claims) secure_bits = '%s%s%s' % (encoded_header, TOKEN_SEP, encoded_claims) sig = _sign(secret, secure_bits) return '%s%s%s' % (secure_bits, TOKEN_SEP, sig)
agpl-3.0
vegetableman/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
124
3148
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.layout_tests.models.test_failures import * class TestFailuresTest(unittest.TestCase): def assert_loads(self, cls): failure_obj = cls() s = failure_obj.dumps() new_failure_obj = TestFailure.loads(s) self.assertIsInstance(new_failure_obj, cls) self.assertEqual(failure_obj, new_failure_obj) # Also test that != is implemented. self.assertFalse(failure_obj != new_failure_obj) def test_unknown_failure_type(self): class UnknownFailure(TestFailure): def message(self): return '' failure_obj = UnknownFailure() self.assertRaises(ValueError, determine_result_type, [failure_obj]) def test_message_is_virtual(self): failure_obj = TestFailure() self.assertRaises(NotImplementedError, failure_obj.message) def test_loads(self): for c in ALL_FAILURE_CLASSES: self.assert_loads(c) def test_equals(self): self.assertEqual(FailureCrash(), FailureCrash()) self.assertNotEqual(FailureCrash(), FailureTimeout()) crash_set = set([FailureCrash(), FailureCrash()]) self.assertEqual(len(crash_set), 1) # The hash happens to be the name of the class, but sets still work: crash_set = set([FailureCrash(), "FailureCrash"]) self.assertEqual(len(crash_set), 2) def test_crashes(self): self.assertEqual(FailureCrash().message(), 'DumpRenderTree crashed') self.assertEqual(FailureCrash(process_name='foo', pid=1234).message(), 'foo crashed [pid=1234]')
bsd-3-clause
dcnoren/PHPinjectable
webapp/include/adodb/scripts/updateversion.py
23
10700
#!/usr/bin/python -u ''' ADOdb version update script Updates the version number, and release date in all php and html files ''' from datetime import date import getopt import os from os import path import re import subprocess import sys # ADOdb version validation regex # These are used by sed - they are not PCRE ! _version_dev = "dev" _version_regex = "[Vv]?([0-9]\.[0-9]+)(\.([0-9]+))?(-?%s)?" % _version_dev _release_date_regex = "[0-9?]+-.*-[0-9]+" _changelog_file = "docs/changelog.md" _tag_prefix = "v" # Command-line options options = "hct" long_options = ["help", "commit", "tag"] def usage(): print '''Usage: %s version Parameters: version ADOdb version, format: [v]X.YY[a-z|dev] Options: -c | --commit Automatically commit the changes -t | --tag Create a tag for the new release -h | --help Show this usage message ''' % ( path.basename(__file__) ) #end usage() def version_is_dev(version): ''' Returns true if version is a development release ''' return version.endswith(_version_dev) def version_is_patch(version): ''' Returns true if version is a patch release (i.e. X.Y.Z with Z > 0) ''' return not version.endswith('.0') def version_parse(version): ''' Breakdown the version into groups (Z and -dev are optional) 1:(X.Y), 2:(.Z), 3:(Z), 4:(-dev) ''' return re.match(r'^%s$' % _version_regex, version) def version_check(version): ''' Checks that the given version is valid, exits with error if not. Returns the SemVer-normalized version without the "v" prefix - add '.0' if missing patch bit - add '-' before dev release suffix if needed ''' vparse = version_parse(version) if not vparse: usage() print "ERROR: invalid version ! \n" sys.exit(1) vnorm = vparse.group(1) # Add .patch version component if vparse.group(2): vnorm += vparse.group(2) else: # None was specified, assume a .0 release vnorm += '.0' # Normalize version number if version_is_dev(version): vnorm += '-' + _version_dev return vnorm def get_release_date(version): ''' Returns the release date in DD-MMM-YYYY format For development releases, DD-MMM will be ??-??? ''' # Development release if version_is_dev(version): date_format = "??-???-%Y" else: date_format = "%d-%b-%Y" # Define release date return date.today().strftime(date_format) def sed_script(version): ''' Builds sed script to update version information in source files ''' # Version number and release date script = r"s/{}\s+(-?)\s+{}/v{} \5 {}/".format( _version_regex, _release_date_regex, version, get_release_date(version) ) return script def sed_filelist(): ''' Build list of files to update ''' dirlist = [] for root, dirs, files in os.walk(".", topdown=True): # Filter files by extensions files = [ f for f in files if re.search(r'\.(php|html?)$', f, re.IGNORECASE) ] for fname in files: dirlist.append(path.join(root, fname)) return dirlist def tag_name(version): return _tag_prefix + version def tag_check(version): ''' Checks if the tag for the specified version exists in the repository by attempting to check it out Throws exception if not ''' subprocess.check_call( "git checkout --quiet " + tag_name(version), stderr=subprocess.PIPE, shell=True) print "Tag '%s' already exists" % tag_name(version) def tag_delete(version): ''' Deletes the specified tag ''' subprocess.check_call( "git tag --delete " + tag_name(version), stderr=subprocess.PIPE, shell=True) def tag_create(version): ''' Creates the tag for the specified version Returns True if tag created ''' print "Creating release tag '%s'" % tag_name(version) result = subprocess.call( "git tag --sign --message '%s' %s" % ( "ADOdb version %s released %s" % ( version, get_release_date(version) ), tag_name(version) ), shell=True ) return result == 0 def section_exists(filename, version, print_message=True): ''' Checks given file for existing section with specified version ''' script = True for i, line in enumerate(open(filename)): if re.search(r'^## ' + version, line): if print_message: print " Existing section for v%s found," % version, return True return False def version_get_previous(version): ''' Returns the previous version number Don't decrease major versions (raises exception) ''' vprev = version.split('.') item = len(vprev) - 1 while item > 0: val = int(vprev[item]) if val > 0: vprev[item] = str(val - 1) break else: item -= 1 if item == 0: raise ValueError('Refusing to decrease major version number') return '.'.join(vprev) def update_changelog(version): ''' Updates the release date in the Change Log ''' print "Updating Changelog" vparse = version_parse(version) # Version number without '-dev' suffix version_release = vparse.group(1) + vparse.group(2) version_previous = version_get_previous(version_release) if not section_exists(_changelog_file, version_previous, False): raise ValueError( "ERROR: previous version %s does not exist in changelog" % version_previous ) # Check if version already exists in changelog version_exists = section_exists(_changelog_file, version_release) if (not version_exists and not version_is_patch(version) and not version_is_dev(version)): version += '-' + _version_dev release_date = get_release_date(version) # Development release # Insert a new section for next release before the most recent one if version_is_dev(version): # Check changelog file for existing section if version_exists: print "nothing to do" return # No existing section found, insert new one if version_is_patch(version_release): print " Inserting new section for hotfix release v%s" % version else: print " Inserting new section for v%s" % version_release # Adjust previous version number (remove patch component) version_previous = version_parse(version_previous).group(1) script = "1,/^## {0}/s/^## {0}.*$/## {1} - {2}\\n\\n\\0/".format( version_previous, version_release, release_date ) # Stable release (X.Y.0) # Replace the 1st occurence of markdown level 2 header matching version # and release date patterns elif not version_is_patch(version): print " Updating release date for v%s" % version script = r"s/^(## ){0}(\.0)? - {1}.*$/\1{2} - {3}/".format( vparse.group(1), _release_date_regex, version, release_date ) # Hotfix release (X.Y.[0-9]) # Insert a new section for the hotfix release before the most recent # section for version X.Y and display a warning message else: if version_exists: print 'updating release date' script = "s/^## {0}.*$/## {1} - {2}/".format( version.replace('.', '\.'), version, release_date ) else: print " Inserting new section for hotfix release v%s" % version script = "1,/^## {0}/s/^## {0}.*$/## {1} - {2}\\n\\n\\0/".format( version_previous, version, release_date ) print " WARNING: review '%s' to ensure added section is correct" % ( _changelog_file ) subprocess.call( "sed -r -i '%s' %s " % ( script, _changelog_file ), shell=True ) #end update_changelog def version_set(version, do_commit=True, do_tag=True): ''' Bump version number and set release date in source files ''' print "Preparing version bump commit" update_changelog(version) print "Updating version and date in source files" subprocess.call( "sed -r -i '%s' %s " % ( sed_script(version), " ".join(sed_filelist()) ), shell=True ) print "Version set to %s" % version if do_commit: # Commit changes print "Committing" commit_ok = subprocess.call( "git commit --all --message '%s'" % ( "Bump version to %s" % version ), shell=True ) if do_tag: tag_ok = tag_create(version) else: tag_ok = False if commit_ok == 0: print ''' NOTE: you should carefully review the new commit, making sure updates to the files are correct and no additional changes are required. If everything is fine, then the commit can be pushed upstream; otherwise: - Make the required corrections - Amend the commit ('git commit --all --amend' ) or create a new one''' if tag_ok: print ''' - Drop the tag ('git tag --delete %s') - run this script again ''' % ( tag_name(version) ) else: print "Note: changes have been staged but not committed." #end version_set() def main(): # Get command-line options try: opts, args = getopt.gnu_getopt(sys.argv[1:], options, long_options) except getopt.GetoptError, err: print str(err) usage() sys.exit(2) if len(args) < 1: usage() print "ERROR: please specify the version" sys.exit(1) do_commit = False do_tag = False for opt, val in opts: if opt in ("-h", "--help"): usage() sys.exit(0) elif opt in ("-c", "--commit"): do_commit = True elif opt in ("-t", "--tag"): do_tag = True # Mandatory parameters version = version_check(args[0]) # Let's do it os.chdir(subprocess.check_output('git root', shell=True).rstrip()) version_set(version, do_commit, do_tag) #end main() if __name__ == "__main__": main()
gpl-3.0
HarmonyEnterpriseSolutions/toolib
toolib/scripts/businesstime.py
2
3273
################################################################# # Program: Business time logger """ """ __author__ = "Oleg Noga" __date__ = "$Date: 2004/03/24 12:36:39 $" __version__ = "$Revision: 1.2 $" # $Source: D:/HOME/cvs/toolib/scripts/businesstime.py,v $ # # ################################################################# from mx import DateTime import sys, os from toolib.win32.excel import * FILE_PATH = r"\\PAVEL2\public\Abrisola\Hours Of Business\${USER_NAME}\log.xls" FROM = "start" TILL = "stop" TARGET_WORK = "work" TARGET_DINNER = "dinner" DATE_LOOKUP_RANGE = xrange(2, 100) class BusinessTimeDocumentTemplate: def __init__(self): self._columns = {} self._sheetName = 1 def setSheetName(self, sheetName): self._sheetName = sheetName def getSheetName(self): return self._sheetName def addColumn(self, target, index): self._columns[target] = index def getColumn(self, target): return self._columns[target] class BusinessTimeDocument: def __init__(self, template, path): self._path = path self._template = template self._xlSheet = None self._date = None self._dateIndex = None self._recognize() def _recognize(self): sheet = self.getExcelSheet() for i in DATE_LOOKUP_RANGE: value = sheet.getCell(i, self._template.getColumn("index")) if value is not None: try: self._date = DateTime.DateTimeFromCOMDate(value) self._dateIndex = i return except: print "* Error reading date:", value, type(value) continue raise "Bad document" def getRowForDate(self, date): return self._dateIndex + int(round((date - self._date).days)) def setTime(self, target, from_or_till): today = DateTime.today() col = self._template.getColumn((target, from_or_till)) row = self.getRowForDate(today) now = DateTime.now() value = now.abstime / 60 / 60 / 24 print "+ %s %s: Setting time at (%s, %s) to %s" % (from_or_till, target, row, col, now) self._xlSheet.setCell(row, col, value) def save(self): self.getExcelSheet().getBook().save() def getExcelSheet(self): if self._xlSheet is None: try: self._xlSheet = ExcelBook(self._path).getSheet(self._template.getSheetName()) except: raise "Excel file not found: " + self._path return self._xlSheet def __del__(self): if self._xlSheet: self.getExcelSheet().getBook().close() def getTemplate(): template = BusinessTimeDocumentTemplate() template.addColumn("index", 1) template.addColumn((TARGET_WORK, FROM), 3) template.addColumn((TARGET_WORK, TILL), 4) template.addColumn((TARGET_DINNER, FROM), 5) template.addColumn((TARGET_DINNER, TILL), 6) return template def usage(args): print " Usage: %s < %s | %s > < %s | %s >" % (args[0], FROM, TILL, TARGET_WORK, TARGET_DINNER) def main(args): try: from_or_till = args[1] target = args[2] except IndexError: print "! Wrong arguments" usage(args) return 1 file = FILE_PATH.replace("${USER_NAME}", os.getenv("USERNAME")) doc = BusinessTimeDocument(getTemplate(), file) doc.setTime(target, from_or_till) doc.save() return 0 if __name__ == "__main__": try: sys.exit(main(sys.argv)) except SystemExit: raise except: apply(sys.excepthook, sys.exc_info()) sys.exit(1)
gpl-2.0
lzw120/django
build/lib/django/contrib/gis/tests/relatedapp/tests.py
198
14731
from __future__ import absolute_import from datetime import date from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint from django.contrib.gis.db.models import Collect, Count, Extent, F, Union from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite from django.test import TestCase from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article class RelatedGeoModelTest(TestCase): def test02_select_related(self): "Testing `select_related` on geographic models (see #7126)." qs1 = City.objects.all() qs2 = City.objects.select_related() qs3 = City.objects.select_related('location') # Reference data for what's in the fixtures. cities = ( ('Aurora', 'TX', -97.516111, 33.058333), ('Roswell', 'NM', -104.528056, 33.387222), ('Kecksburg', 'PA', -79.460734, 40.18476), ) for qs in (qs1, qs2, qs3): for ref, c in zip(cities, qs): nm, st, lon, lat = ref self.assertEqual(nm, c.name) self.assertEqual(st, c.state) self.assertEqual(Point(lon, lat), c.location.point) @no_mysql def test03_transform_related(self): "Testing the `transform` GeoQuerySet method on related geographic models." # All the transformations are to state plane coordinate systems using # US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot). tol = 0 def check_pnt(ref, pnt): self.assertAlmostEqual(ref.x, pnt.x, tol) self.assertAlmostEqual(ref.y, pnt.y, tol) self.assertEqual(ref.srid, pnt.srid) # Each city transformed to the SRID of their state plane coordinate system. transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'), ('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'), ('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'), ) for name, srid, wkt in transformed: # Doing this implicitly sets `select_related` select the location. # TODO: Fix why this breaks on Oracle. qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point')) check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point) @no_mysql @no_spatialite def test04a_related_extent_aggregate(self): "Testing the `extent` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Extent('location__point')) # One for all locations, one that excludes New Mexico (Roswell). all_extent = (-104.528056, 29.763374, -79.460734, 40.18476) txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476) e1 = City.objects.extent(field_name='location__point') e2 = City.objects.exclude(state='NM').extent(field_name='location__point') e3 = aggs['location__point__extent'] # The tolerance value is to four decimal places because of differences # between the Oracle and PostGIS spatial backends on the extent calculation. tol = 4 for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]: for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol) @no_mysql def test04b_related_union_aggregate(self): "Testing the `unionagg` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Union('location__point')) # These are the points that are components of the aggregate geographic # union that is returned. Each point # corresponds to City PK. p1 = Point(-104.528056, 33.387222) p2 = Point(-97.516111, 33.058333) p3 = Point(-79.460734, 40.18476) p4 = Point(-96.801611, 32.782057) p5 = Point(-95.363151, 29.763374) # Creating the reference union geometry depending on the spatial backend, # as Oracle will have a different internal ordering of the component # geometries than PostGIS. The second union aggregate is for a union # query that includes limiting information in the WHERE clause (in other # words a `.filter()` precedes the call to `.unionagg()`). if oracle: ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326) ref_u2 = MultiPoint(p3, p2, srid=4326) else: # Looks like PostGIS points by longitude value. ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326) ref_u2 = MultiPoint(p2, p3, srid=4326) u1 = City.objects.unionagg(field_name='location__point') u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point') u3 = aggs['location__point__union'] self.assertEqual(ref_u1, u1) self.assertEqual(ref_u2, u2) self.assertEqual(ref_u1, u3) def test05_select_related_fk_to_subclass(self): "Testing that calling select_related on a query over a model with an FK to a model subclass works" # Regression test for #9752. l = list(DirectoryEntry.objects.all().select_related()) def test06_f_expressions(self): "Testing F() expressions on GeometryFields." # Constructing a dummy parcel border and getting the City instance for # assigning the FK. b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326) pcity = City.objects.get(name='Aurora') # First parcel has incorrect center point that is equal to the City; # it also has a second border that is different from the first as a # 100ft buffer around the City. c1 = pcity.location.point c2 = c1.transform(2276, clone=True) b2 = c2.buffer(100) p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2) # Now creating a second Parcel where the borders are the same, just # in different coordinate systems. The center points are also the # same (but in different coordinate systems), and this time they # actually correspond to the centroid of the border. c1 = b1.centroid c2 = c1.transform(2276, clone=True) p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1) # Should return the second Parcel, which has the center within the # border. qs = Parcel.objects.filter(center1__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) if not mysql: # This time center2 is in a different coordinate system and needs # to be wrapped in transformation SQL. qs = Parcel.objects.filter(center2__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) # Should return the first Parcel, which has the center point equal # to the point in the City ForeignKey. qs = Parcel.objects.filter(center1=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) if not mysql: # This time the city column should be wrapped in transformation SQL. qs = Parcel.objects.filter(border2__contains=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) def test07_values(self): "Testing values() and values_list() and GeoQuerySets." # GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively. gqs = Location.objects.all() gvqs = Location.objects.values() gvlqs = Location.objects.values_list() # Incrementing through each of the models, dictionaries, and tuples # returned by the different types of GeoQuerySets. for m, d, t in zip(gqs, gvqs, gvlqs): # The values should be Geometry objects and not raw strings returned # by the spatial database. self.assertTrue(isinstance(d['point'], Geometry)) self.assertTrue(isinstance(t[1], Geometry)) self.assertEqual(m.point, d['point']) self.assertEqual(m.point, t[1]) def test08_defer_only(self): "Testing defer() and only() on Geographic models." qs = Location.objects.all() def_qs = Location.objects.defer('point') for loc, def_loc in zip(qs, def_qs): self.assertEqual(loc.point, def_loc.point) def test09_pk_relations(self): "Ensuring correct primary key column is selected across relations. See #10757." # The expected ID values -- notice the last two location IDs # are out of order. Dallas and Houston have location IDs that differ # from their PKs -- this is done to ensure that the related location # ID column is selected instead of ID column for the city. city_ids = (1, 2, 3, 4, 5) loc_ids = (1, 2, 3, 5, 4) ids_qs = City.objects.order_by('id').values('id', 'location__id') for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids): self.assertEqual(val_dict['id'], c_id) self.assertEqual(val_dict['location__id'], l_id) def test10_combine(self): "Testing the combination of two GeoQuerySets. See #10807." buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1) buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1) qs1 = City.objects.filter(location__point__within=buf1) qs2 = City.objects.filter(location__point__within=buf2) combined = qs1 | qs2 names = [c.name for c in combined] self.assertEqual(2, len(names)) self.assertTrue('Aurora' in names) self.assertTrue('Kecksburg' in names) def test11_geoquery_pickle(self): "Ensuring GeoQuery objects are unpickled correctly. See #10839." import pickle from django.contrib.gis.db.models.sql import GeoQuery qs = City.objects.all() q_str = pickle.dumps(qs.query) q = pickle.loads(q_str) self.assertEqual(GeoQuery, q.__class__) # TODO: fix on Oracle -- get the following error because the SQL is ordered # by a geometry object, which Oracle apparently doesn't like: # ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type @no_oracle def test12a_count(self): "Testing `Count` aggregate use with the `GeoManager` on geo-fields." # The City, 'Fort Worth' uses the same location as Dallas. dallas = City.objects.get(name='Dallas') # Count annotation should be 2 for the Dallas location now. loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id) self.assertEqual(2, loc.num_cities) def test12b_count(self): "Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087." # Should only be one author (Trevor Paglen) returned by this query, and # the annotation should have 3 for the number of books, see #11087. # Also testing with a `GeoValuesQuerySet`, see #11489. qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1) vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1) self.assertEqual(1, len(qs)) self.assertEqual(3, qs[0].num_books) self.assertEqual(1, len(vqs)) self.assertEqual(3, vqs[0]['num_books']) def test13c_count(self): "Testing `Count` aggregate with `.values()`. See #15305." qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities') self.assertEqual(1, len(qs)) self.assertEqual(2, qs[0]['num_cities']) self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry)) # TODO: The phantom model does appear on Oracle. @no_oracle def test13_select_related_null_fk(self): "Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381." no_author = Book.objects.create(title='Without Author') b = Book.objects.select_related('author').get(title='Without Author') # Should be `None`, and not a 'dummy' model. self.assertEqual(None, b.author) @no_mysql @no_oracle @no_spatialite def test14_collect(self): "Testing the `collect` GeoQuerySet method and `Collect` aggregate." # Reference query: # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN # "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id") # WHERE "relatedapp_city"."state" = 'TX'; ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)') c1 = City.objects.filter(state='TX').collect(field_name='location__point') c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect'] for coll in (c1, c2): # Even though Dallas and Ft. Worth share same point, Collect doesn't # consolidate -- that's why 4 points in MultiPoint. self.assertEqual(4, len(coll)) self.assertEqual(ref_geom, coll) def test15_invalid_select_related(self): "Testing doing select_related on the related name manager of a unique FK. See #13934." qs = Article.objects.select_related('author__article') # This triggers TypeError when `get_default_columns` has no `local_only` # keyword. The TypeError is swallowed if QuerySet is actually # evaluated as list generation swallows TypeError in CPython. sql = str(qs.query) def test16_annotated_date_queryset(self): "Ensure annotated date querysets work if spatial backend is used. See #14648." birth_years = [dt.year for dt in list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))] birth_years.sort() self.assertEqual([1950, 1974], birth_years) # TODO: Related tests for KML, GML, and distance lookups.
bsd-3-clause
sgerhart/ansible
lib/ansible/module_utils/cloud.py
194
8653
# # (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # """ This module adds shared support for generic cloud modules In order to use this module, include it as part of a custom module as shown below. from ansible.module_utils.cloud import CloudRetry The 'cloud' module provides the following common classes: * CloudRetry - The base class to be used by other cloud providers, in order to provide a backoff/retry decorator based on status codes. - Example using the AWSRetry class which inherits from CloudRetry. @AWSRetry.exponential_backoff(retries=10, delay=3) get_ec2_security_group_ids_from_names() @AWSRetry.jittered_backoff() get_ec2_security_group_ids_from_names() """ import random from functools import wraps import syslog import time def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60): """ Customizable exponential backoff strategy. Args: retries (int): Maximum number of times to retry a request. delay (float): Initial (base) delay. backoff (float): base of the exponent to use for exponential backoff. max_delay (int): Optional. If provided each delay generated is capped at this amount. Defaults to 60 seconds. Returns: Callable that returns a generator. This generator yields durations in seconds to be used as delays for an exponential backoff strategy. Usage: >>> backoff = _exponential_backoff() >>> backoff <function backoff_backoff at 0x7f0d939facf8> >>> list(backoff()) [2, 4, 8, 16, 32, 60, 60, 60, 60, 60] """ def backoff_gen(): for retry in range(0, retries): sleep = delay * backoff ** retry yield sleep if max_delay is None else min(sleep, max_delay) return backoff_gen def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random): """ Implements the "Full Jitter" backoff strategy described here https://www.awsarchitectureblog.com/2015/03/backoff.html Args: retries (int): Maximum number of times to retry a request. delay (float): Approximate number of seconds to sleep for the first retry. max_delay (int): The maximum number of seconds to sleep for any retry. _random (random.Random or None): Makes this generator testable by allowing developers to explicitly pass in the a seeded Random. Returns: Callable that returns a generator. This generator yields durations in seconds to be used as delays for a full jitter backoff strategy. Usage: >>> backoff = _full_jitter_backoff(retries=5) >>> backoff <function backoff_backoff at 0x7f0d939facf8> >>> list(backoff()) [3, 6, 5, 23, 38] >>> list(backoff()) [2, 1, 6, 6, 31] """ def backoff_gen(): for retry in range(0, retries): yield _random.randint(0, min(max_delay, delay * 2 ** retry)) return backoff_gen class CloudRetry(object): """ CloudRetry can be used by any cloud provider, in order to implement a backoff algorithm/retry effect based on Status Code from Exceptions. """ # This is the base class of the exception. # AWS Example botocore.exceptions.ClientError base_class = None @staticmethod def status_code_from_exception(error): """ Return the status code from the exception object Args: error (object): The exception itself. """ pass @staticmethod def found(response_code, catch_extra_error_codes=None): """ Return True if the Response Code to retry on was found. Args: response_code (str): This is the Response Code that is being matched against. """ pass @classmethod def _backoff(cls, backoff_strategy, catch_extra_error_codes=None): """ Retry calling the Cloud decorated function using the provided backoff strategy. Args: backoff_strategy (callable): Callable that returns a generator. The generator should yield sleep times for each retry of the decorated function. """ def deco(f): @wraps(f) def retry_func(*args, **kwargs): for delay in backoff_strategy(): try: return f(*args, **kwargs) except Exception as e: if isinstance(e, cls.base_class): response_code = cls.status_code_from_exception(e) if cls.found(response_code, catch_extra_error_codes): msg = "{0}: Retrying in {1} seconds...".format(str(e), delay) syslog.syslog(syslog.LOG_INFO, msg) time.sleep(delay) else: # Return original exception if exception is not a ClientError raise e else: # Return original exception if exception is not a ClientError raise e return f(*args, **kwargs) return retry_func # true decorator return deco @classmethod def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None): """ Retry calling the Cloud decorated function using an exponential backoff. Kwargs: retries (int): Number of times to retry a failed request before giving up default=10 delay (int or float): Initial delay between retries in seconds default=3 backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry default=1.1 max_delay (int or None): maximum amount of time to wait between retries. default=60 """ return cls._backoff(_exponential_backoff( retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes) @classmethod def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None): """ Retry calling the Cloud decorated function using a jittered backoff strategy. More on this strategy here: https://www.awsarchitectureblog.com/2015/03/backoff.html Kwargs: retries (int): Number of times to retry a failed request before giving up default=10 delay (int): Initial delay between retries in seconds default=3 max_delay (int): maximum amount of time to wait between retries. default=60 """ return cls._backoff(_full_jitter_backoff( retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes) @classmethod def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): """ Retry calling the Cloud decorated function using an exponential backoff. Compatibility for the original implementation of CloudRetry.backoff that did not provide configurable backoff strategies. Developers should use CloudRetry.exponential_backoff instead. Kwargs: tries (int): Number of times to try (not retry) before giving up default=10 delay (int or float): Initial delay between retries in seconds default=3 backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry default=1.1 """ return cls.exponential_backoff( retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
mit
brennanblue/svgplotlib
svgplotlib/SVG/Transform.py
2
4259
#!/usr/bin/python # -*- coding: utf-8 -*- from svgplotlib.SVG.Parsers import Lexer, EOF class ParseTransformError(Exception): pass class Transform(Lexer): """ Break SVG transform into tokens. """ numfloat = object() numint = object() string = object() skip = object() numbers = frozenset((numfloat, numint)) lexicon = ( \ (numfloat , Lexer.Float), (numint , Lexer.Int), (string , r'\w+'), (skip , r'[\(\), \n]'), ) ignore = frozenset((skip,)) callbacks = { numfloat : lambda self,value: float(value), numint : lambda self,value: float(value) } def __init__(self): Lexer.__init__(self) def assertion(self, condition, msg = ''): if not condition: raise ParseTransformError(msg) def iterparse(self, text): """ Parse a string of SVG transform data. """ assertion = self.assertion next = self.lex(text).next numbers = self.numbers string = self.string token, value = next() while token != EOF: assertion(token is string, 'Expected string') transform = value if transform == 'matrix': token, a = next() assertion(token in numbers, 'Expected number') token, b = next() assertion(token in numbers, 'Expected number') token, c = next() assertion(token in numbers, 'Expected number') token, d = next() assertion(token in numbers, 'Expected number') token, e = next() assertion(token in numbers, 'Expected number') token, f = next() assertion(token in numbers, 'Expected number') yield (transform, (a,b,c,d,e,f)) elif transform == 'translate': token, tx = next() assertion(token in numbers, 'Expected number') token, value = next() ty = value if not token in numbers: ty = 0. yield (transform, (tx, ty)) if not token in numbers: continue elif transform == 'scale': token, sx = next() assertion(token in numbers, 'Expected number') token, value = next() sy = value if not token in numbers: sy = sx yield (transform, (sx, sy)) if not token in numbers: continue elif transform == 'rotate': token, angle = next() assertion(token in numbers, 'Expected number') token, value = next() cx = value if token in numbers: token, value = next() assertion(token in numbers, 'Expected number') cy = value yield (transform, (angle,(cx,cy))) else: yield (transform, (angle,None)) continue elif transform == 'skewX' or transform == 'skewY': token, value = next() angle = value assertion(token in numbers, 'Expected number') yield (transform, (angle,)) else: raise ParseTransformError("unknown transform '%s'" % transform) # fetch next token token, value = next() parseTransform = Transform() if __name__ == '__main__': print tuple(parseTransform.iterparse("scale(1.8) translate(0, -150)"))
bsd-3-clause
GenericStudent/home-assistant
homeassistant/components/blockchain/sensor.py
16
2276
"""Support for Blockchain.com sensors.""" from datetime import timedelta import logging from pyblockchain import get_balance, validate_address import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTRIBUTION = "Data provided by blockchain.com" CONF_ADDRESSES = "addresses" DEFAULT_NAME = "Bitcoin Balance" ICON = "mdi:currency-btc" SCAN_INTERVAL = timedelta(minutes=5) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_ADDRESSES): [cv.string], vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Blockchain.com sensors.""" addresses = config[CONF_ADDRESSES] name = config[CONF_NAME] for address in addresses: if not validate_address(address): _LOGGER.error("Bitcoin address is not valid: %s", address) return False add_entities([BlockchainSensor(name, addresses)], True) class BlockchainSensor(Entity): """Representation of a Blockchain.com sensor.""" def __init__(self, name, addresses): """Initialize the sensor.""" self._name = name self.addresses = addresses self._state = None self._unit_of_measurement = "BTC" @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement this sensor expresses itself in.""" return self._unit_of_measurement @property def icon(self): """Return the icon to use in the frontend, if any.""" return ICON @property def device_state_attributes(self): """Return the state attributes of the sensor.""" return {ATTR_ATTRIBUTION: ATTRIBUTION} def update(self): """Get the latest state of the sensor.""" self._state = get_balance(self.addresses)
apache-2.0
alunarbeach/spark
python/pyspark/ml/__init__.py
32
1143
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ DataFrame-based machine learning APIs to let users quickly assemble and configure practical machine learning pipelines. """ from pyspark.ml.base import Estimator, Model, Transformer, UnaryTransformer from pyspark.ml.pipeline import Pipeline, PipelineModel __all__ = ["Transformer", "UnaryTransformer", "Estimator", "Model", "Pipeline", "PipelineModel"]
apache-2.0
beepee14/scikit-learn
examples/ensemble/plot_gradient_boosting_oob.py
230
4762
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn.cross_validation import KFold from sklearn.cross_validation import train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3))) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5, 'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3} clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params['n_estimators'] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``. """ score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = clf.loss_(y_test, y_pred) return score def cv_estimate(n_folds=3): cv = KFold(n=X_train.shape[0], n_folds=n_folds) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv: cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_folds return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # plot curves and vertical lines for best iterations plt.plot(x, cumsum, label='OOB loss', color=oob_color) plt.plot(x, test_score, label='Test loss', color=test_color) plt.plot(x, cv_score, label='CV loss', color=cv_color) plt.axvline(x=oob_best_iter, color=oob_color) plt.axvline(x=test_best_iter, color=test_color) plt.axvline(x=cv_best_iter, color=cv_color) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array(xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter]) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ['OOB', 'CV', 'Test']) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label) plt.legend(loc='upper right') plt.ylabel('normalized loss') plt.xlabel('number of iterations') plt.show()
bsd-3-clause
SoftlySplinter/dotfiles
plugins/git-prompt/gitstatus.py
22
2291
#!/usr/bin/env python # -*- coding: UTF-8 -*- # change those symbols to whatever you prefer symbols = {'ahead of': '↑', 'behind': '↓', 'staged':'♦', 'changed':'‣', 'untracked':'…', 'clean':'⚡', 'unmerged':'≠', 'sha1':':'} from subprocess import Popen, PIPE output,error = Popen(['git','status'], stdout=PIPE, stderr=PIPE).communicate() if error: import sys sys.exit(0) lines = output.splitlines() import re behead_re = re.compile(r"^# Your branch is (ahead of|behind) '(.*)' by (\d+) commit") diverge_re = re.compile(r"^# and have (\d+) and (\d+) different") status = '' staged = re.compile(r'^# Changes to be committed:$', re.MULTILINE) changed = re.compile(r'^# Changed but not updated:$', re.MULTILINE) untracked = re.compile(r'^# Untracked files:$', re.MULTILINE) unmerged = re.compile(r'^# Unmerged paths:$', re.MULTILINE) def execute(*command): out, err = Popen(stdout=PIPE, stderr=PIPE, *command).communicate() if not err: nb = len(out.splitlines()) else: nb = '?' return nb if staged.search(output): nb = execute(['git','diff','--staged','--name-only','--diff-filter=ACDMRT']) status += '%s%s' % (symbols['staged'], nb) if unmerged.search(output): nb = execute(['git','diff', '--staged','--name-only', '--diff-filter=U']) status += '%s%s' % (symbols['unmerged'], nb) if changed.search(output): nb = execute(['git','diff','--name-only', '--diff-filter=ACDMRT']) status += '%s%s' % (symbols['changed'], nb) if untracked.search(output): ## nb = len(Popen(['git','ls-files','--others','--exclude-standard'],stdout=PIPE).communicate()[0].splitlines()) ## status += "%s" % (symbols['untracked']*(nb//3 + 1), ) status += symbols['untracked'] if status == '': status = symbols['clean'] remote = '' bline = lines[0] if bline.find('Not currently on any branch') != -1: branch = symbols['sha1']+ Popen(['git','rev-parse','--short','HEAD'], stdout=PIPE).communicate()[0][:-1] else: branch = bline.split(' ')[3] bstatusline = lines[1] match = behead_re.match(bstatusline) if match: remote = symbols[match.groups()[0]] remote += match.groups()[2] elif lines[2:]: div_match = diverge_re.match(lines[2]) if div_match: remote = "{behind}{1}{ahead of}{0}".format(*div_match.groups(), **symbols) print '\n'.join([branch,remote,status])
mit
nikolas/lettuce
tests/integration/lib/Django-1.3/django/contrib/gis/db/backends/base.py
258
10674
""" Base/mixin classes for the spatial backend database operations and the `SpatialRefSys` model the backend. """ import re from django.conf import settings from django.contrib.gis import gdal class BaseSpatialOperations(object): """ This module holds the base `BaseSpatialBackend` object, which is instantiated by each spatial database backend with the features it has. """ distance_functions = {} geometry_functions = {} geometry_operators = {} geography_operators = {} geography_functions = {} gis_terms = {} truncate_params = {} # Quick booleans for the type of this spatial backend, and # an attribute for the spatial database version tuple (if applicable) postgis = False spatialite = False mysql = False oracle = False spatial_version = None # How the geometry column should be selected. select = None # Does the spatial database have a geography type? geography = False area = False centroid = False difference = False distance = False distance_sphere = False distance_spheroid = False envelope = False force_rhr = False mem_size = False bounding_circle = False num_geom = False num_points = False perimeter = False perimeter3d = False point_on_surface = False polygonize = False reverse = False scale = False snap_to_grid = False sym_difference = False transform = False translate = False union = False # Aggregates collect = False extent = False extent3d = False make_line = False unionagg = False # Serialization geohash = False geojson = False gml = False kml = False svg = False # Constructors from_text = False from_wkb = False # Default conversion functions for aggregates; will be overridden if implemented # for the spatial backend. def convert_extent(self, box): raise NotImplementedError('Aggregate extent not implemented for this spatial backend.') def convert_extent3d(self, box): raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.') def convert_geom(self, geom_val, geom_field): raise NotImplementedError('Aggregate method not implemented for this spatial backend.') # For quoting column values, rather than columns. def geo_quote_name(self, name): if isinstance(name, unicode): name = name.encode('ascii') return "'%s'" % name # GeometryField operations def geo_db_type(self, f): """ Returns the database column type for the geometry field on the spatial backend. """ raise NotImplementedError def get_distance(self, f, value, lookup_type): """ Returns the distance parameters for the given geometry field, lookup value, and lookup type. """ raise NotImplementedError('Distance operations not available on this spatial backend.') def get_geom_placeholder(self, f, value): """ Returns the placeholder for the given geometry field with the given value. Depending on the spatial backend, the placeholder may contain a stored procedure call to the transformation function of the spatial backend. """ raise NotImplementedError # Spatial SQL Construction def spatial_aggregate_sql(self, agg): raise NotImplementedError('Aggregate support not implemented for this spatial backend.') def spatial_lookup_sql(self, lvalue, lookup_type, value, field): raise NotImplmentedError # Routines for getting the OGC-compliant models. def geometry_columns(self): raise NotImplementedError def spatial_ref_sys(self): raise NotImplementedError class SpatialRefSysMixin(object): """ The SpatialRefSysMixin is a class used by the database-dependent SpatialRefSys objects to reduce redundnant code. """ # For pulling out the spheroid from the spatial reference string. This # regular expression is used only if the user does not have GDAL installed. # TODO: Flattening not used in all ellipsoids, could also be a minor axis, # or 'b' parameter. spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),') # For pulling out the units on platforms w/o GDAL installed. # TODO: Figure out how to pull out angular units of projected coordinate system and # fix for LOCAL_CS types. GDAL should be highly recommended for performing # distance queries. units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$') @property def srs(self): """ Returns a GDAL SpatialReference object, if GDAL is installed. """ if gdal.HAS_GDAL: # TODO: Is caching really necessary here? Is complexity worth it? if hasattr(self, '_srs'): # Returning a clone of the cached SpatialReference object. return self._srs.clone() else: # Attempting to cache a SpatialReference object. # Trying to get from WKT first. try: self._srs = gdal.SpatialReference(self.wkt) return self.srs except Exception, msg: pass try: self._srs = gdal.SpatialReference(self.proj4text) return self.srs except Exception, msg: pass raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg)) else: raise Exception('GDAL is not installed.') @property def ellipsoid(self): """ Returns a tuple of the ellipsoid parameters: (semimajor axis, semiminor axis, and inverse flattening). """ if gdal.HAS_GDAL: return self.srs.ellipsoid else: m = self.spheroid_regex.match(self.wkt) if m: return (float(m.group('major')), float(m.group('flattening'))) else: return None @property def name(self): "Returns the projection name." return self.srs.name @property def spheroid(self): "Returns the spheroid name for this spatial reference." return self.srs['spheroid'] @property def datum(self): "Returns the datum for this spatial reference." return self.srs['datum'] @property def projected(self): "Is this Spatial Reference projected?" if gdal.HAS_GDAL: return self.srs.projected else: return self.wkt.startswith('PROJCS') @property def local(self): "Is this Spatial Reference local?" if gdal.HAS_GDAL: return self.srs.local else: return self.wkt.startswith('LOCAL_CS') @property def geographic(self): "Is this Spatial Reference geographic?" if gdal.HAS_GDAL: return self.srs.geographic else: return self.wkt.startswith('GEOGCS') @property def linear_name(self): "Returns the linear units name." if gdal.HAS_GDAL: return self.srs.linear_name elif self.geographic: return None else: m = self.units_regex.match(self.wkt) return m.group('unit_name') @property def linear_units(self): "Returns the linear units." if gdal.HAS_GDAL: return self.srs.linear_units elif self.geographic: return None else: m = self.units_regex.match(self.wkt) return m.group('unit') @property def angular_name(self): "Returns the name of the angular units." if gdal.HAS_GDAL: return self.srs.angular_name elif self.projected: return None else: m = self.units_regex.match(self.wkt) return m.group('unit_name') @property def angular_units(self): "Returns the angular units." if gdal.HAS_GDAL: return self.srs.angular_units elif self.projected: return None else: m = self.units_regex.match(self.wkt) return m.group('unit') @property def units(self): "Returns a tuple of the units and the name." if self.projected or self.local: return (self.linear_units, self.linear_name) elif self.geographic: return (self.angular_units, self.angular_name) else: return (None, None) @classmethod def get_units(cls, wkt): """ Class method used by GeometryField on initialization to retrive the units on the given WKT, without having to use any of the database fields. """ if gdal.HAS_GDAL: return gdal.SpatialReference(wkt).units else: m = cls.units_regex.match(wkt) return m.group('unit'), m.group('unit_name') @classmethod def get_spheroid(cls, wkt, string=True): """ Class method used by GeometryField on initialization to retrieve the `SPHEROID[..]` parameters from the given WKT. """ if gdal.HAS_GDAL: srs = gdal.SpatialReference(wkt) sphere_params = srs.ellipsoid sphere_name = srs['spheroid'] else: m = cls.spheroid_regex.match(wkt) if m: sphere_params = (float(m.group('major')), float(m.group('flattening'))) sphere_name = m.group('name') else: return None if not string: return sphere_name, sphere_params else: # `string` parameter used to place in format acceptable by PostGIS if len(sphere_params) == 3: radius, flattening = sphere_params[0], sphere_params[2] else: radius, flattening = sphere_params return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening) def __unicode__(self): """ Returns the string representation. If GDAL is installed, it will be 'pretty' OGC WKT. """ try: return unicode(self.srs) except: return unicode(self.wkt)
gpl-3.0
CubicERP/odoo
addons/crm/__openerp__.py
258
4199
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'CRM', 'version': '1.0', 'category': 'Customer Relationship Management', 'sequence': 2, 'summary': 'Leads, Opportunities, Phone Calls', 'description': """ The generic OpenERP Customer Relationship Management ==================================================== This application enables a group of people to intelligently and efficiently manage leads, opportunities, meetings and phone calls. It manages key tasks such as communication, identification, prioritization, assignment, resolution and notification. OpenERP ensures that all cases are successfully tracked by users, customers and suppliers. It can automatically send reminders, escalate the request, trigger specific methods and many other actions based on your own enterprise rules. The greatest thing about this system is that users don't need to do anything special. The CRM module has an email gateway for the synchronization interface between mails and OpenERP. That way, users can just send emails to the request tracker. OpenERP will take care of thanking them for their message, automatically routing it to the appropriate staff and make sure all future correspondence gets to the right place. Dashboard for CRM will include: ------------------------------- * Planned Revenue by Stage and User (graph) * Opportunities by Stage (graph) """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/crm', 'depends': [ 'base_action_rule', 'base_setup', 'sales_team', 'mail', 'email_template', 'calendar', 'resource', 'board', 'fetchmail', ], 'data': [ 'crm_data.xml', 'crm_lead_data.xml', 'crm_phonecall_data.xml', 'security/crm_security.xml', 'security/ir.model.access.csv', 'wizard/crm_lead_to_opportunity_view.xml', 'wizard/crm_phonecall_to_phonecall_view.xml', 'wizard/crm_merge_opportunities_view.xml', 'crm_view.xml', 'crm_phonecall_view.xml', 'crm_phonecall_menu.xml', 'crm_lead_view.xml', 'crm_lead_menu.xml', 'calendar_event_menu.xml', 'report/crm_lead_report_view.xml', 'report/crm_opportunity_report_view.xml', 'report/crm_phonecall_report_view.xml', 'res_partner_view.xml', 'res_config_view.xml', 'base_partner_merge_view.xml', 'sales_team_view.xml', ], 'demo': [ 'crm_demo.xml', 'crm_lead_demo.xml', 'crm_phonecall_demo.xml', 'crm_action_rule_demo.xml', ], 'test': [ 'test/crm_access_group_users.yml', 'test/crm_lead_message.yml', 'test/lead2opportunity2win.yml', 'test/lead2opportunity_assign_salesmen.yml', 'test/crm_lead_merge.yml', 'test/crm_lead_cancel.yml', 'test/segmentation.yml', 'test/phonecalls.yml', 'test/crm_lead_onchange.yml', 'test/crm_lead_copy.yml', 'test/crm_lead_unlink.yml', 'test/crm_lead_find_stage.yml', ], 'installable': True, 'application': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
blink1073/pexpect
tests/test_log.py
20
3713
#!/usr/bin/env python ''' PEXPECT LICENSE This license is approved by the OSI and FSF as GPL-compatible. http://opensource.org/licenses/isc-license.txt Copyright (c) 2012, Noah Spurrier <noah@noah.org> PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ''' import pexpect import unittest import os import tempfile from . import PexpectTestCase # the program cat(1) may display ^D\x08\x08 when \x04 (EOF, Ctrl-D) is sent _CAT_EOF = b'^D\x08\x08' class TestCaseLog(PexpectTestCase.PexpectTestCase): def test_log (self): log_message = 'This is a test.' filename = tempfile.mktemp() mylog = open(filename, 'wb') p = pexpect.spawn('echo', [log_message]) p.logfile = mylog p.expect(pexpect.EOF) p.logfile = None mylog.close() with open(filename, 'rb') as f: lf = f.read() os.unlink(filename) self.assertEqual(lf.rstrip(), log_message.encode('ascii')) def test_log_logfile_read (self): log_message = 'This is a test.' filename = tempfile.mktemp() mylog = open(filename, 'wb') p = pexpect.spawn('cat') p.logfile_read = mylog p.sendline(log_message) p.sendeof() p.expect(pexpect.EOF) p.logfile = None mylog.close() with open(filename, 'rb') as f: lf = f.read() os.unlink (filename) lf = lf.replace(_CAT_EOF, b'') self.assertEqual(lf, b'This is a test.\r\nThis is a test.\r\n') def test_log_logfile_send (self): log_message = b'This is a test.' filename = tempfile.mktemp() mylog = open (filename, 'wb') p = pexpect.spawn('cat') p.logfile_send = mylog p.sendline(log_message) p.sendeof() p.expect (pexpect.EOF) p.logfile = None mylog.close() with open(filename, 'rb') as f: lf = f.read() os.unlink(filename) lf = lf.replace(b'\x04', b'') self.assertEqual(lf.rstrip(), log_message) def test_log_send_and_received (self): '''The logfile should have the test message three time -- once for the data we sent. Once for the data that cat echos back as characters are typed. And once for the data that cat prints after we send a linefeed (sent by sendline). ''' log_message = 'This is a test.' filename = tempfile.mktemp() mylog = open(filename, 'wb') p = pexpect.spawn('cat') p.logfile = mylog p.sendline(log_message) p.sendeof() p.expect (pexpect.EOF) p.logfile = None mylog.close() with open(filename, 'rb') as f: lf = f.read() os.unlink(filename) lf = lf.replace(b'\x04', b'').replace(_CAT_EOF, b'') self.assertEqual(lf, b'This is a test.\nThis is a test.\r\nThis is a test.\r\n') if __name__ == '__main__': unittest.main() suite = unittest.makeSuite(TestCaseLog,'test')
isc
alisidd/tensorflow
tensorflow/python/training/saver_large_partitioned_variable_test.py
141
2261
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for tensorflow.python.training.saver.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import saver class SaverLargePartitionedVariableTest(test.TestCase): # Need to do this in a separate test because of the amount of memory needed # to run this test. def testLargePartitionedVariables(self): save_path = os.path.join(self.get_temp_dir(), "large_variable") var_name = "my_var" # Saving large partition variable. with session.Session("", graph=ops.Graph()) as sess: with ops.device("/cpu:0"): # Create a partitioned variable which is larger than int32 size but # split into smaller sized variables. init = lambda shape, dtype, partition_info: constant_op.constant( True, dtype, shape) partitioned_var = partitioned_variables.create_partitioned_variables( [1 << 31], [4], init, dtype=dtypes.bool, name=var_name) variables.global_variables_initializer().run() save = saver.Saver(partitioned_var) val = save.save(sess, save_path) self.assertEqual(save_path, val) if __name__ == "__main__": test.main()
apache-2.0
wolverine2k/htc7x30-3.0
scripts/gcc-wrapper.py
64
3825
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011, Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Code Aurora nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Invoke gcc, looking for warnings, and causing a failure if there are # non-whitelisted warnings. import errno import re import os import sys import subprocess # Note that gcc uses unicode, which may depend on the locale. TODO: # force LANG to be set to en_US.UTF-8 to get consistent warnings. allowed_warnings = set([ "alignment.c:720", "async.c:122", "async.c:270", "dir.c:43", "dm.c:1053", "dm.c:1080", "dm-table.c:1120", "dm-table.c:1126", "drm_edid.c:1303", "eventpoll.c:1143", "f_mass_storage.c:3368", "inode.c:72", "inode.c:73", "inode.c:74", "msm_sdcc.c:126", "msm_sdcc.c:128", "nf_conntrack_netlink.c:790", "nf_nat_standalone.c:118", "return_address.c:62", "soc-core.c:1719", "xt_log.h:50", "vx6953.c:3124", ]) # Capture the name of the object file, can find it. ofile = None warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''') def interpret_warning(line): """Decode the message from gcc. The messages we care about have a filename, and a warning""" line = line.rstrip('\n') m = warning_re.match(line) if m and m.group(2) not in allowed_warnings: print "error, forbidden warning:", m.group(2) # If there is a warning, remove any object if it exists. if ofile: try: os.remove(ofile) except OSError: pass sys.exit(1) def run_gcc(): args = sys.argv[1:] # Look for -o try: i = args.index('-o') global ofile ofile = args[i+1] except (ValueError, IndexError): pass compiler = sys.argv[0] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE) for line in proc.stderr: print line, # interpret_warning(line) result = proc.wait() except OSError as e: result = e.errno if result == errno.ENOENT: print args[0] + ':',e.strerror print 'Is your PATH set correctly?' else: print ' '.join(args), str(e) return result if __name__ == '__main__': status = run_gcc() sys.exit(status)
gpl-2.0
h3llrais3r/SickRage
lib/github/PaginatedList.py
23
7862
# -*- coding: utf-8 -*- # ########################## Copyrights and license ############################ # # # Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2012 Zearin <zearin@gonk.net> # # Copyright 2013 AKFish <akfish@gmail.com> # # Copyright 2013 Bill Mill <bill.mill@gmail.com> # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2013 davidbrai <davidbrai@gmail.com> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # # ############################################################################## import github.GithubObject class PaginatedListBase: def __init__(self): self.__elements = list() def __getitem__(self, index): assert isinstance(index, (int, slice)) if isinstance(index, (int, long)): self.__fetchToIndex(index) return self.__elements[index] else: return self._Slice(self, index) def __iter__(self): for element in self.__elements: yield element while self._couldGrow(): newElements = self._grow() for element in newElements: yield element def _isBiggerThan(self, index): return len(self.__elements) > index or self._couldGrow() def __fetchToIndex(self, index): while len(self.__elements) <= index and self._couldGrow(): self._grow() def _grow(self): newElements = self._fetchNextPage() self.__elements += newElements return newElements class _Slice: def __init__(self, theList, theSlice): self.__list = theList self.__start = theSlice.start or 0 self.__stop = theSlice.stop self.__step = theSlice.step or 1 def __iter__(self): index = self.__start while not self.__finished(index): if self.__list._isBiggerThan(index): yield self.__list[index] index += self.__step else: return def __finished(self, index): return self.__stop is not None and index >= self.__stop class PaginatedList(PaginatedListBase): """ This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_. You can simply enumerate through instances of this class:: for repo in user.get_repos(): print repo.name You can also index them or take slices:: second_repo = user.get_repos()[1] first_repos = user.get_repos()[:10] If you want to iterate in reversed order, just do:: for repo in user.get_repos().reversed: print repo.name And if you really need it, you can explicitely access a specific page:: some_repos = user.get_repos().get_page(0) some_other_repos = user.get_repos().get_page(3) """ def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None): PaginatedListBase.__init__(self) self.__requester = requester self.__contentClass = contentClass self.__firstUrl = firstUrl self.__firstParams = firstParams or () self.__nextUrl = firstUrl self.__nextParams = firstParams or {} self.__headers = headers if self.__requester.per_page != 30: self.__nextParams["per_page"] = self.__requester.per_page self._reversed = False self.__totalCount = None @property def totalCount(self): if not self.__totalCount: self._grow() return self.__totalCount def _getLastPageUrl(self): headers, data = self.__requester.requestJsonAndCheck( "GET", self.__firstUrl, parameters=self.__nextParams, headers=self.__headers ) links = self.__parseLinkHeader(headers) lastUrl = links.get("last") return lastUrl @property def reversed(self): r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams) r.__reverse() return r def __reverse(self): self._reversed = True lastUrl = self._getLastPageUrl() if lastUrl: self.__nextUrl = lastUrl def _couldGrow(self): return self.__nextUrl is not None def _fetchNextPage(self): headers, data = self.__requester.requestJsonAndCheck( "GET", self.__nextUrl, parameters=self.__nextParams, headers=self.__headers ) data = data if data else [] self.__nextUrl = None if len(data) > 0: links = self.__parseLinkHeader(headers) if self._reversed: if "prev" in links: self.__nextUrl = links["prev"] elif "next" in links: self.__nextUrl = links["next"] self.__nextParams = None if 'items' in data: self.__totalCount = data['total_count'] data = data["items"] content = [ self.__contentClass(self.__requester, headers, element, completed=False) for element in data if element is not None ] if self._reversed: return content[::-1] return content def __parseLinkHeader(self, headers): links = {} if "link" in headers: linkHeaders = headers["link"].split(", ") for linkHeader in linkHeaders: (url, rel) = linkHeader.split("; ") url = url[1:-1] rel = rel[5:-1] links[rel] = url return links def get_page(self, page): params = dict(self.__firstParams) if page != 0: params["page"] = page + 1 if self.__requester.per_page != 30: params["per_page"] = self.__requester.per_page headers, data = self.__requester.requestJsonAndCheck( "GET", self.__firstUrl, parameters=params, headers=self.__headers ) if 'items' in data: self.__totalCount = data['total_count'] data = data["items"] return [ self.__contentClass(self.__requester, headers, element, completed=False) for element in data ]
gpl-3.0
ephes/scikit-learn
examples/linear_model/plot_sgd_loss_functions.py
249
1095
""" ========================== SGD: convex loss functions ========================== A plot that compares the various convex loss functions supported by :class:`sklearn.linear_model.SGDClassifier` . """ print(__doc__) import numpy as np import matplotlib.pyplot as plt def modified_huber_loss(y_true, y_pred): z = y_pred * y_true loss = -4 * z loss[z >= -1] = (1 - z[z >= -1]) ** 2 loss[z >= 1.] = 0 return loss xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-', label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-', label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), 'm-', label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-', label="Log loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-', label="Squared hinge loss") plt.plot(xx, modified_huber_loss(xx, 1), 'y--', label="Modified Huber loss") plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") plt.ylabel("$L(y, f(x))$") plt.show()
bsd-3-clause
benninkcorien/InkcyAutomate
inkcyauto/automate-legacy.py
1
9729
# Ink image scraping from Klundtasaur's Airtable # https://airtable.com/shrF8Vr0O5VPB6ZoR/tblp9rsxx6AsHwOzW/viw42CNIgWnLe5NSl import csv import os import urllib import multiprocessing from time import time def retrieve_airtable(filename): ''' Download CSV file from source and save as airtable-source.csv https://s3.amazonaws.com/airtable-csv-exports-production/85ae40fcf90f305273c19039ae5694a1/Table%201-Main%20View.csv this is NOT a static URL! !!! NOT WORKING ANYMORE - For now you have to manually download the file from https://airtable.com/shrF8Vr0O5VPB6ZoR/tblp9rsxx6AsHwOzW/viw42CNIgWnLe5NSl (click on the three dots, then "Download CSV") and save it as airtable-source.csv in the same folder this script is in !!! ''' airtabl_url = "https://s3.amazonaws.com/airtable-csv-exports-production/b82da440462b15aade5a2ca1705aeef3/Table%201-Main%20View.csv" atcsv = urllib.URLopener() try: atcsv.retrieve(airtabl_url, filename) except: assert False, "Unable to retrieve airtable file" def import_csv(filename): ''' Generate the difference.csv file file1 is the short one file2 is the long one thanks to hotpotatobeans for this part :) ''' with open(filename) as file: # DictReader returns a generator, which would've been exhausted # on line 15 return tuple(csv.DictReader(file)) def save_differences(filename, master_file, new_file, matching_field="Imgur Address"): ''' Calculates the differences between two files and saves it to a CSV file ''' uniq_images = set(i[matching_field] for i in new_file) - set(i[matching_field] for i in master_file) uniq_list = [i for i in new_file if i[matching_field] in uniq_images and len(i[matching_field].strip())] with open(filename, 'wb') as file: writer = csv.DictWriter(file, ('Name', 'Status', 'Donated by', 'Inkbot version', 'Imgur Address', 'Brand+ink regex', 'Automod rule')) writer.writerows(uniq_list) return uniq_list def download_image((image_path, imagelink)): ''' Downloads a single image to a file for use in the parallel map ''' if os.path.exists(image_path): return "" print "Retrieving file: {}".format(image_path) try: image = urllib.URLopener() image.retrieve(imagelink, image_path) return "" except: return image_path + "-" + imagelink def download_images(image_folder, uniq_list): ''' Download all images and give them the correct name Returns a list of failed images Note: I've used multiprocessing here to download multiple images in parallel for faster processing if you're interested in how this works check out the documentation here: https://docs.python.org/2/library/multiprocessing.html ''' MP_pool = multiprocessing.Pool() image_data = [(os.path.join(image_folder, item['Name'].strip() + ".jpg"), item["Imgur Address"]) for item in uniq_list] results = MP_pool.map(download_image, image_data) return [x for x in results if len(x)] def main(): file_folder = "./" # Same folder script is run from image_folder = "./Images" # A folder called Images inside the folder the script is in wordpress_file = os.path.join(file_folder, "goedewordpress.csv") airtable_file = os.path.join(file_folder, "airtable-source.csv") difference_file = os.path.join(file_folder, "difference.csv") assert os.path.exists(wordpress_file), "No Wordpress CSV file found" start_time = time() print "\nRetrieving Airtable file.." # has to be done manually for now # retrieve_airtable(airtable_file) print "\nAirtable File retrieved!" print "\nImporting CSV files..." file1 = import_csv(wordpress_file) file2 = import_csv(airtable_file) print "\nCSV Files imported!" print "\nSaving the file differences..." uniq_list = save_differences(difference_file, file1, file2) print "\nDifferences saved!" # THE BELOW IS NO LONGER NEEDED DUE TO THE TRY EXCEPTS IN THE DOWNLOAD LOOP # ## remove all empty rows and entries that have no imgur URL ## raw_input("Remove rows that have no imgur URL, then press Enter to continue...") print "\nDownloading new images..." failed_images = download_images(image_folder, uniq_list) print "\nNew Images downloaded!" if len(failed_images): print "\nThe following images failed to Download:\n{}".format("\n".join(failed_images)) print "\nScript completed in: {}s".format(time() - start_time) if __name__ == '__main__': main() # TO DO for helpful python coders: # Make difference.csv look like import-products.csv # #id, SKU, Title, Content, Product Type, parent_id ,Product Categories ,Colors ,brand, ink attributes, paper, nib size, Image URL # -- id: blank # -- sku: blank (will be auto-generated on import) # -- Title : ink name (already working) ''' Needed: Find the ink on fp-ink.info, and pull information from the table for the matching row. The table has the following header Brand Series Name Hue Lite Vibrancy L D Shade Pig EG LE DC Dry (s) Flow Lubrication Bleed Feather Ghost # Find match for ink. Either have python use the Search function and search on brandname first to get a smaller table # or just search through all 16 pages.. # Once you've found a match, you need to access the correct row in the table # you can then get the values for that cell in that row using beautifulsoup : === Beautifulsoup Example on how to get data for all Columns from the table on the page (the_url) === Needed: for specific row only START OF CODE import urllib from bs4 import BeautifulSoup # the page that has the ink on it , the number at the end changes, currently goes up to 16. the_url = "https://www.fp-ink.info/en/?page=2" r = urllib.urlopen(the_url).read().decode('utf-8') soup = BeautifulSoup(r, "html.parser") # get all Names : cols = [header.string for header in soup.find('thead').findAll('th')] namecolumn = cols.index('Name') name_values = [td[namecolumn].string.encode() for td in [tr.findAll('td') for tr in soup.find('tbody').findAll('tr')]] print name_values # -- Brand # get all Brands : cols = [header.string for header in soup.find('thead').findAll('th')] brandcolumn = cols.index('Brand') brand_values = [td[brandcolumn].string.encode() for td in [tr.findAll('td') for tr in soup.find('tbody').findAll('tr')]] print brand_values END OF CODE ''' ''' # Output of print cols[x] : # [None, u'Brand', u'Series', u'Name', None, None, None, None, u'D', None, None, None, None, None, None, None, None, None, None, None] # print cols[0] None - Details # print cols[1] Brand # print cols[2] Series # print cols[3] Name # print cols[4] None - Hue Needed - same process for Content and Color as we used for Names and Brands , except that those have no header name, and cols.index[0] is not working... For Content: if the ink exists on fp-ink.info, the Content column in difference.csv should have this format: <a href="http://www.fp-ink.info/en/details/224.ink"><img src="http://www.fp-ink.info/colorcard224.png"></a> Where the number 224 changes in two places The individual ink links on fp-ink.info have the following format: https://www.fp-ink.info/en/details/300.ink?page=13 You want to keep only the 300 # STEP 1 # go to matching row, get the a href link of "Details" # I think something like this should work to get the href text so you can strip it and get the number. # details_url = table first column .find('link', href=True) # details_url_string = details_url.get('href') # strip the "https://www.fp-ink.info/en/details/" from the string, and then keep only the first numbers (until you hit the . in .ink) # STEP 2 store that value as fpink_number = # 300 in the example above # Generate the text for the Content column in difference.csv : content = '<a href="http://www.fp-ink.info/en/details/' + str(fpink_number) + '.ink"><img src="http://www.fp-ink.info/colorcard/' + str(fpink_number) + '.png"></a>' # print content indeed gives the correct content, yay ''' # -- Product Type: simple # -- parent_id : 0 # -- Product Categories: inks ''' Colors : the correct color for that ink # # Current colors used are black, blue, black, blue-black, brown, burgundy, gray, green, highlighter, white, invisible/white, orange, pink, purple, red, turquoise, yellow # # The fp-ink site sometimes has blue-green where I would have both blue and green ''' # -- ink attributes: has to be set manually for each one. # -- Paper : Neenah NEUTECH PS # -- nib size: has to be set manually for each one. # -- image URL # # I think I can batch upload the images into Wordpress, the problem is Wordpress will auto rename images that have the same name # # I will look into this, but don't mind manually adding these since it's easy to see in WooCommerce which images are missing, and a good check to see if I got all missing inks added. # # (ToDo for me: learn how to merge different branches from helpful coders into the main file and read more Python books) # # CSV file to be imported into WooCommerce has this format: # import-products.csv # --?? Suggestions welcome for more improvements. # -- I would like to keep code very readable (so I understand what it does)
cc0-1.0
j-carpentier/nova
nova/objects/aggregate.py
29
7877
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import utils as compute_utils from nova import db from nova import exception from nova import objects from nova.objects import base from nova.objects import fields # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class Aggregate(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode VERSION = '1.1' fields = { 'id': fields.IntegerField(), 'name': fields.StringField(), 'hosts': fields.ListOfStringsField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), } obj_extra_fields = ['availability_zone'] @staticmethod def _from_db_object(context, aggregate, db_aggregate): for key in aggregate.fields: if key == 'metadata': db_key = 'metadetails' else: db_key = key aggregate[key] = db_aggregate[db_key] aggregate._context = context aggregate.obj_reset_changes() return aggregate def _assert_no_hosts(self, action): if 'hosts' in self.obj_what_changed(): raise exception.ObjectActionError( action=action, reason='hosts updated inline') @base.remotable_classmethod def get_by_id(cls, context, aggregate_id): db_aggregate = db.aggregate_get(context, aggregate_id) return cls._from_db_object(context, cls(), db_aggregate) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') self._assert_no_hosts('create') updates = self.obj_get_changes() payload = dict(updates) if 'metadata' in updates: # NOTE(danms): For some reason the notification format is weird payload['meta_data'] = payload.pop('metadata') compute_utils.notify_about_aggregate_update(self._context, "create.start", payload) metadata = updates.pop('metadata', None) db_aggregate = db.aggregate_create(self._context, updates, metadata=metadata) self._from_db_object(self._context, self, db_aggregate) payload['aggregate_id'] = self.id compute_utils.notify_about_aggregate_update(self._context, "create.end", payload) @base.remotable def save(self): self._assert_no_hosts('save') updates = self.obj_get_changes() payload = {'aggregate_id': self.id} if 'metadata' in updates: payload['meta_data'] = updates['metadata'] compute_utils.notify_about_aggregate_update(self._context, "updateprop.start", payload) updates.pop('id', None) db_aggregate = db.aggregate_update(self._context, self.id, updates) compute_utils.notify_about_aggregate_update(self._context, "updateprop.end", payload) self._from_db_object(self._context, self, db_aggregate) @base.remotable def update_metadata(self, updates): payload = {'aggregate_id': self.id, 'meta_data': updates} compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.start", payload) to_add = {} for key, value in updates.items(): if value is None: try: db.aggregate_metadata_delete(self._context, self.id, key) except exception.AggregateMetadataNotFound: pass try: self.metadata.pop(key) except KeyError: pass else: to_add[key] = value self.metadata[key] = value db.aggregate_metadata_add(self._context, self.id, to_add) compute_utils.notify_about_aggregate_update(self._context, "updatemetadata.end", payload) self.obj_reset_changes(fields=['metadata']) @base.remotable def destroy(self): db.aggregate_delete(self._context, self.id) @base.remotable def add_host(self, host): db.aggregate_host_add(self._context, self.id, host) if self.hosts is None: self.hosts = [] self.hosts.append(host) self.obj_reset_changes(fields=['hosts']) @base.remotable def delete_host(self, host): db.aggregate_host_delete(self._context, self.id, host) self.hosts.remove(host) self.obj_reset_changes(fields=['hosts']) @property def availability_zone(self): return self.metadata.get('availability_zone', None) @base.NovaObjectRegistry.register class AggregateList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added key argument to get_by_host() # Aggregate <= version 1.1 # Version 1.2: Added get_by_metadata_key VERSION = '1.2' fields = { 'objects': fields.ListOfObjectsField('Aggregate'), } # NOTE(danms): Aggregate was at 1.1 before we added this obj_relationships = { 'objects': [('1.0', '1.1'), ('1.1', '1.1'), ('1.2', '1.1')], } @classmethod def _filter_db_aggregates(cls, db_aggregates, hosts): if not isinstance(hosts, set): hosts = set(hosts) filtered_aggregates = [] for db_aggregate in db_aggregates: for host in db_aggregate['hosts']: if host in hosts: filtered_aggregates.append(db_aggregate) break return filtered_aggregates @base.remotable_classmethod def get_all(cls, context): db_aggregates = db.aggregate_get_all(context) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_host(cls, context, host, key=None): db_aggregates = db.aggregate_get_by_host(context, host, key=key) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates) @base.remotable_classmethod def get_by_metadata_key(cls, context, key, hosts=None): db_aggregates = db.aggregate_get_by_metadata_key(context, key=key) if hosts is not None: db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts) return base.obj_make_list(context, cls(context), objects.Aggregate, db_aggregates)
apache-2.0
apporc/nova
nova/manager.py
2
4605
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from oslo_config import cfg from oslo_log import log as logging from oslo_service import periodic_task from nova.db import base from nova import rpc CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') LOG = logging.getLogger(__name__) class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class Manager(base.Base, PeriodicTasks): def __init__(self, host=None, db_driver=None, service_name='undefined'): if not host: host = CONF.host self.host = host self.backdoor_port = None self.service_name = service_name self.notifier = rpc.get_notifier(self.service_name, self.host) self.additional_endpoints = [] super(Manager, self).__init__(db_driver) def periodic_tasks(self, context, raise_on_error=False): """Tasks to be run at a periodic interval.""" return self.run_periodic_tasks(context, raise_on_error=raise_on_error) def init_host(self): """Hook to do additional manager initialization when one requests the service be started. This is called before any service record is created. Child classes should override this method. """ pass def cleanup_host(self): """Hook to do cleanup work when the service shuts down. Child classes should override this method. """ pass def pre_start_hook(self): """Hook to provide the manager the ability to do additional start-up work before any RPC queues/consumers are created. This is called after other initialization has succeeded and a service record is created. Child classes should override this method. """ pass def post_start_hook(self): """Hook to provide the manager the ability to do additional start-up work immediately after a service creates RPC consumers and starts 'running'. Child classes should override this method. """ pass def reset(self): """Hook called on SIGHUP to signal the manager to re-read any dynamic configuration or do any reconfiguration tasks. """ pass
apache-2.0
palisadoes/switchmap-ng
switchmap/test/test_mib_ciscoc2900.py
2
4909
#!/usr/bin/env python3 """Test the mib_ciscocdp module.""" import os import sys import unittest from mock import Mock # Try to create a working PYTHONPATH TEST_DIRECTORY = os.path.dirname(os.path.realpath(__file__)) SWITCHMAP_DIRECTORY = os.path.abspath(os.path.join(TEST_DIRECTORY, os.pardir)) ROOT_DIRECTORY = os.path.abspath(os.path.join(SWITCHMAP_DIRECTORY, os.pardir)) if TEST_DIRECTORY.endswith('/switchmap-ng/switchmap/test') is True: sys.path.append(ROOT_DIRECTORY) else: print( 'This script is not installed in the "switchmap-ng/bin" directory. ' 'Please fix.') sys.exit(2) from switchmap.snmp.cisco import mib_ciscoc2900 as testimport class Query(object): """Class for snmp_manager.Query mock. A detailed tutorial about Python mocks can be found here: http://www.drdobbs.com/testing/using-mocks-in-python/240168251 """ def query(self): """Do an SNMP query.""" pass def oid_exists(self): """Determine existence of OID on device.""" pass def swalk(self): """Do a failsafe SNMPwalk.""" pass def walk(self): """Do a SNMPwalk.""" pass class KnownValues(unittest.TestCase): """Checks all functions and methods.""" ######################################################################### # General object setup ######################################################################### # SNMPwalk results used by Mocks. # Normalized walk returning integers nwalk_results_integer = { 100: 1234, 200: 5678 } # Set the stage for SNMPwalk for integer results snmpobj_integer = Mock(spec=Query) mock_spec_integer = { 'swalk.return_value': nwalk_results_integer, 'walk.return_value': nwalk_results_integer, } snmpobj_integer.configure_mock(**mock_spec_integer) # Initializing key variables expected_dict = { 100: { 'c2900PortLinkbeatStatus': 1234, 'c2900PortDuplexStatus': 1234, }, 200: { 'c2900PortLinkbeatStatus': 5678, 'c2900PortDuplexStatus': 5678, } } def test_get_query(self): """Testing function get_query.""" pass def test_init_query(self): """Testing function init_query.""" pass def test___init__(self): """Testing function __init__.""" pass def test_layer1(self): """Testing function layer1.""" # Initializing key variables expected_dict = { 100: { 'c2900PortLinkbeatStatus': 1234, 'c2900PortDuplexStatus': 1234, }, 200: { 'c2900PortLinkbeatStatus': 5678, 'c2900PortDuplexStatus': 5678, } } # Set the stage for SNMPwalk snmpobj = Mock(spec=Query) mock_spec = {'walk.return_value': self.nwalk_results_integer} snmpobj.configure_mock(**mock_spec) # Get results testobj = testimport.init_query(snmpobj) results = testobj.layer1() # Basic testing of results for primary in results.keys(): for secondary in results[primary].keys(): self.assertEqual( results[primary][secondary], expected_dict[primary][secondary]) def test_c2900portlinkbeatstatus(self): """Testing function c2900portlinkbeatstatus.""" # Initialize key variables oid_key = 'c2900PortLinkbeatStatus' oid = '.1.3.6.1.4.1.9.9.87.1.4.1.1.18' # Get results testobj = testimport.init_query(self.snmpobj_integer) results = testobj.c2900portlinkbeatstatus() # Basic testing of results for key, value in results.items(): self.assertEqual(isinstance(key, int), True) self.assertEqual(value, self.expected_dict[key][oid_key]) # Test that we are getting the correct OID results = testobj.c2900portlinkbeatstatus(oidonly=True) self.assertEqual(results, oid) def test_c2900portduplexstatus(self): """Testing function c2900portduplexstatus.""" # Initialize key variables oid_key = 'c2900PortLinkbeatStatus' oid = '.1.3.6.1.4.1.9.9.87.1.4.1.1.32' # Get results testobj = testimport.init_query(self.snmpobj_integer) results = testobj.c2900portduplexstatus() # Basic testing of results for key, value in results.items(): self.assertEqual(isinstance(key, int), True) self.assertEqual(value, self.expected_dict[key][oid_key]) # Test that we are getting the correct OID results = testobj.c2900portduplexstatus(oidonly=True) self.assertEqual(results, oid) if __name__ == '__main__': # Do the unit test unittest.main()
apache-2.0