repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
jamtot/PyProjectEuler | 59 - XOR decryption/xor.py | 1 | 1389 | import string
def splitinput(filename):
with open(filename) as ciphertext:
return [int(x) for x in ciphertext.read().split(",")]
def findtext(cipher):
letterlist = list(string.ascii_lowercase)
lenllist = len(letterlist)
output = ''
# top 5 common English words
common = ["the", "be", "to", "of", "and"]
for i in xrange(lenllist):
for j in xrange(lenllist):
for k in xrange(lenllist):
#print letterlist[i],letterlist[j],letterlist[k]
for x in xrange(len(cipher)):
if x%3==0:
output+=chr(cipher[x] ^ ord(letterlist[i]))
elif x%3==1:
output+=chr(cipher[x] ^ ord(letterlist[j]))
elif x%3==2:
output+=chr(cipher[x] ^ ord(letterlist[k]))
#print output
if all(c in output for c in common):
#print letterlist[i],letterlist[j],letterlist[k]
#print output
return output
output=''
return "Unsuccessful decryption."
def textsum(text):
tsum=0
for i in xrange(len(text)):
tsum+=ord(text[i])
return tsum
if __name__ == "__main__":
cipher = splitinput("p059_cipher.txt")
text = findtext(cipher)
print textsum(text)
| mit |
zqzhang/crosswalk-test-suite | apptools/apptools-ios-tests/apptools/build.py | 15 | 2014 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hongjuan, Wang<hongjuanx.wang@intel.com>
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
import commands
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_build_release(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
buildcmd = comm.PackTools + "crosswalk-app build release"
comm.build(self, buildcmd)
comm.clear("org.xwalk.test")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
HydroH/CLaIM | PHPQuery.py | 1 | 1605 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import hashlib
import urllib
import urllib2
import time
PASS_FILE = 'data/password.txt'
URL = 'http://hydroh.xyz/koi/db/dbquery.php'
class PHPQuery(object):
# 获取页面源码
@staticmethod
def parse_query(date, keyword, article):
# 过滤UTF8MB4预处理
try:
# UCS-4
highpoints = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# UCS-2
highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
# 删除utf8mb4字符
article = highpoints.sub(u'\u25FD', article)
# hash
hash_raw = keyword + date + article
hash_object = hashlib.md5(hash_raw.encode())
hash_string = str(hash_object.hexdigest())
hash_string = hash_string[8:-8]
# id
try:
conf_id = int(article[article.find('❤表白') + 3:article.find(':')])
except ValueError:
return
data = {
'hash': hash_string,
'postdate': date,
'keyword': keyword,
'confid': conf_id,
'text': article,
'password': open(PASS_FILE, 'r').read()
}
post_data = urllib.urlencode(data)
unfinished = True
while unfinished:
try:
content = urllib2.urlopen(url=URL, data=post_data, timeout=10)
time.sleep(2)
unfinished = False
except:
print "网络连接失败,重试中……\n"
print content.read()+'\n'
| gpl-2.0 |
shikhardb/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 14 | 15763 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
v4hn/ecto | python/ecto/sphinx/__init__.py | 5 | 1585 | #
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
| bsd-3-clause |
ttfseiko/openerp-trunk | openerp/addons/base/ir/ir_attachment.py | 1 | 14659 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hashlib
import itertools
import logging
import os
import re
from openerp import tools
from openerp.osv import fields,osv
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class ir_attachment(osv.osv):
"""Attachments are used to link binary files or url to any openerp document.
External attachment storage
---------------------------
The 'data' function field (_data_get,data_set) is implemented using
_file_read, _file_write and _file_delete which can be overridden to
implement other storage engines, shuch methods should check for other
location pseudo uri (example: hdfs://hadoppserver)
The default implementation is the file:dirname location that stores files
on the local filesystem using name based on their sha1 hash
"""
_order = 'id desc'
def _name_get_resname(self, cr, uid, ids, object, method, context):
data = {}
for attachment in self.browse(cr, uid, ids, context=context):
model_object = attachment.res_model
res_id = attachment.res_id
if model_object and res_id:
model_pool = self.pool[model_object]
res = model_pool.name_get(cr,uid,[res_id],context)
res_name = res and res[0][1] or False
if res_name:
field = self._columns.get('res_name',False)
if field and len(res_name) > field.size:
res_name = res_name[:field.size-3] + '...'
data[attachment.id] = res_name
else:
data[attachment.id] = False
return data
def _storage(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'ir_attachment.location', 'file')
@tools.ormcache()
def _filestore(self, cr, uid, context=None):
return os.path.join(tools.config['data_dir'], 'filestore', cr.dbname)
# 'data' field implementation
def _full_path(self, cr, uid, location, path):
# sanitize ath
path = re.sub('[.]', '', path)
path = path.strip('/\\')
return os.path.join(self._filestore(cr, uid), path)
def _get_path(self, cr, uid, location, bin_data):
sha = hashlib.sha1(bin_data).hexdigest()
# retro compatibility
fname = sha[:3] + '/' + sha
full_path = self._full_path(cr, uid, location, fname)
if os.path.isfile(full_path):
return fname, full_path # keep existing path
# scatter files across 256 dirs
# we use '/' in the db (even on windows)
fname = sha[:2] + '/' + sha
full_path = self._full_path(cr, uid, location, fname)
dirname = os.path.dirname(full_path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
return fname, full_path
def _file_read(self, cr, uid, location, fname, bin_size=False):
full_path = self._full_path(cr, uid, location, fname)
r = ''
try:
if bin_size:
r = os.path.getsize(full_path)
else:
r = open(full_path,'rb').read().encode('base64')
except IOError:
_logger.error("_read_file reading %s",full_path)
return r
def _file_write(self, cr, uid, location, value):
bin_value = value.decode('base64')
fname, full_path = self._get_path(cr, uid, location, bin_value)
if not os.path.exists(full_path):
try:
with open(full_path, 'wb') as fp:
fp.write(bin_value)
except IOError:
_logger.error("_file_write writing %s", full_path)
return fname
def _file_delete(self, cr, uid, location, fname):
count = self.search(cr, 1, [('store_fname','=',fname)], count=True)
if count <= 1:
full_path = self._full_path(cr, uid, location, fname)
try:
os.unlink(full_path)
except OSError:
_logger.error("_file_delete could not unlink %s",full_path)
except IOError:
# Harmless and needed for race conditions
_logger.error("_file_delete could not unlink %s",full_path)
def _data_get(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
result = {}
location = self._storage(cr, uid, context)
bin_size = context.get('bin_size')
for attach in self.browse(cr, uid, ids, context=context):
if location != 'db' and attach.store_fname:
result[attach.id] = self._file_read(cr, uid, location, attach.store_fname, bin_size)
else:
result[attach.id] = attach.db_datas
return result
def _data_set(self, cr, uid, id, name, value, arg, context=None):
# We dont handle setting data to null
if not value:
return True
if context is None:
context = {}
location = self._storage(cr, uid, context)
file_size = len(value.decode('base64'))
if location != 'db':
attach = self.browse(cr, uid, id, context=context)
if attach.store_fname:
self._file_delete(cr, uid, location, attach.store_fname)
fname = self._file_write(cr, uid, location, value)
# SUPERUSER_ID as probably don't have write access, trigger during create
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'store_fname': fname, 'file_size': file_size}, context=context)
else:
super(ir_attachment, self).write(cr, SUPERUSER_ID, [id], {'db_datas': value, 'file_size': file_size}, context=context)
return True
_name = 'ir.attachment'
_columns = {
'name': fields.char('Attachment Name',size=256, required=True),
'datas_fname': fields.char('File Name',size=256),
'description': fields.text('Description'),
'res_name': fields.function(_name_get_resname, type='char', size=128, string='Resource Name', store=True),
'res_model': fields.char('Resource Model',size=64, readonly=True, help="The database object this attachment will be attached to"),
'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
'Type', help="Binary File or URL", required=True, change_default=True),
'url': fields.char('Url', size=1024),
# al: We keep shitty field names for backward compatibility with document
'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
'store_fname': fields.char('Stored Filename', size=256),
'db_datas': fields.binary('Database Data'),
'file_size': fields.integer('File Size'),
}
_defaults = {
'type': 'binary',
'file_size': 0,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
}
def _auto_init(self, cr, context=None):
super(ir_attachment, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
cr.commit()
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Restricts the access to an ir.attachment, according to referred model
In the 'document' module, it is overriden to relax this hard rule, since
more complex ones apply there.
"""
res_ids = {}
if ids:
if isinstance(ids, (int, long)):
ids = [ids]
cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
for rmod, rid in cr.fetchall():
if not (rmod and rid):
continue
res_ids.setdefault(rmod,set()).add(rid)
if values:
if values.get('res_model') and values.get('res_id'):
res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
ima = self.pool.get('ir.model.access')
for model, mids in res_ids.items():
# ignore attachments that are not attached to a resource anymore when checking access rights
# (resource was deleted but attachment was not)
mids = self.pool[model].exists(cr, uid, mids)
ima.check(cr, uid, model, mode)
self.pool[model].check_access_rule(cr, uid, mids, mode, context=context)
def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
limit=limit, order=order,
context=context, count=False,
access_rights_uid=access_rights_uid)
if not ids:
if count:
return 0
return []
# Work with a set, as list.remove() is prohibitive for large lists of documents
# (takes 20+ seconds on a db with 100k docs during search_count()!)
orig_ids = ids
ids = set(ids)
# For attachments, the permissions of the document they are attached to
# apply, so we must remove attachments for which the user cannot access
# the linked document.
# Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
# and the permissions are checked in super() and below anyway.
cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
targets = cr.dictfetchall()
model_attachments = {}
for target_dict in targets:
if not target_dict['res_model']:
continue
# model_attachments = { 'model': { 'res_id': [id1,id2] } }
model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'] or 0, set()).add(target_dict['id'])
# To avoid multiple queries for each attachment found, checks are
# performed in batch as much as possible.
ima = self.pool.get('ir.model.access')
for model, targets in model_attachments.iteritems():
if not self.pool.get(model):
continue
if not ima.check(cr, uid, model, 'read', False):
# remove all corresponding attachment ids
for attach_id in itertools.chain(*targets.values()):
ids.remove(attach_id)
continue # skip ir.rule processing, these ones are out already
# filter ids according to what access rules permit
target_ids = targets.keys()
allowed_ids = [0] + self.pool[model].search(cr, uid, [('id', 'in', target_ids)], context=context)
disallowed_ids = set(target_ids).difference(allowed_ids)
for res_id in disallowed_ids:
for attach_id in targets[res_id]:
ids.remove(attach_id)
# sort result according to the original sort ordering
result = [id for id in orig_ids if id in ids]
return len(result) if count else list(result)
def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'read', context=context)
return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'write', context=context, values=vals)
if 'file_size' in vals:
del vals['file_size']
return super(ir_attachment, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
self.check(cr, uid, [id], 'write', context=context)
return super(ir_attachment, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self.check(cr, uid, ids, 'unlink', context=context)
location = self._storage(cr, uid, context)
if location != 'db':
for attach in self.browse(cr, uid, ids, context=context):
if attach.store_fname:
self._file_delete(cr, uid, location, attach.store_fname)
return super(ir_attachment, self).unlink(cr, uid, ids, context)
def create(self, cr, uid, values, context=None):
self.check(cr, uid, [], mode='write', context=context, values=values)
if 'file_size' in values:
del values['file_size']
return super(ir_attachment, self).create(cr, uid, values, context)
def action_get(self, cr, uid, context=None):
return self.pool.get('ir.actions.act_window').for_xml_id(
cr, uid, 'base', 'action_attachment', context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
michelts/lettuce | tests/integration/lib/Django-1.2.5/django/conf/locale/pt_BR/formats.py | 80 | 1252 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r'j \de N \de Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \de N \de Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \de Y'
MONTH_DAY_FORMAT = r'j \de F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-3.0 |
sourabhlal/cds | tests/unit/test_access.py | 1 | 2721 | # -*- coding: utf-8 -*-
#
# This file is part of CDS.
# Copyright (C) 2015 CERN.
#
# CDS is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CDS is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CDS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test access control package."""
from __future__ import absolute_import, print_function
import uuid
from cds.modules.access.access_control import cern_read_factory
from flask import g
from flask_principal import RoleNeed, UserNeed
from invenio_records.api import Record
from cds.modules.access.access_control import CERNRecordsSearch
def mock_provides(needs):
"""Mock user provides."""
g.identity = lambda: None
g.identity.provides = needs
def test_record_access(db):
"""Test access control for search."""
mock_provides([UserNeed('test@test.ch'), RoleNeed('groupX')])
def check_record(json, allowed=True):
# Create uuid
id = uuid.uuid4()
# Create record
rec = type('obj', (object,), {'id': id})
Record.create(json, id_=id)
# Check permission factory
factory = cern_read_factory(rec)
assert factory.can() if allowed else not factory.can()
# Check test records
check_record({'foo': 'bar'})
check_record({'_access': {'read': ['test@test.ch', 'groupA', 'groupB']}})
check_record({'_access': {'read': ['test2@test2.ch', 'groupC']}}, False)
check_record({'_access': {'read': ['groupX']}})
check_record({'_access': {'read': ['test@test.ch', 'groupA', 'groupB']}})
check_record({'_access': {'read': []}})
def test_es_filter():
"""Test query filter based on CERN groups."""
mock_provides([UserNeed('test@test.ch'), RoleNeed('groupX')])
assert CERNRecordsSearch().to_dict()['query']['bool']['filter'] == [
{'bool': {'filter': [{'bool': {
'should': [
{'missing': {'field': '_access.read'}},
{'terms': {'_access.read': ['test@test.ch', 'groupX']}}
]
}}]}}
]
| gpl-2.0 |
gosquadron/squadron | squadron/service.py | 1 | 10440 | from commit import _get_service_file
import jsonschema
import subprocess
import fnmatch
import os
from fileio import dirio
from log import log
import glob
import tempfile
import stat
_action_schema = {
'type': 'object',
'properties': {
'commands': {
'description': 'commands to run',
'type': 'array',
'items': {
'type':'string'
},
'minItems': 1
},
'chdir': {
'description': 'directory to change to before running commands',
'type': 'string',
},
'not_after': {
'description': 'don\'t run this after any of these actions',
'type' : 'array',
'items': {
'type': 'string'
},
'uniqueItems': True
}
},
'required': ['commands']
}
_reaction_schema = {
'type':'array',
'items':{
'type':'object',
'properties':{
'execute':{
'description':'which action to execute',
'type':'array',
'items':{
'type':'string'
},
'minItems': 1,
'uniqueItems':True
},
'when':{
'type':'object',
'properties':{
'command':{
'description':'command to run, use with exitcode_not',
'type':'string'
},
'exitcode_not':{
'description':'exit code to match against (inverted)',
'type':'integer'
},
'files':{
'description':'if any of these files were created or modified',
'type':'array',
'items':{
'type':'string'
}
},
'files_created':{
'description':'if any of these files were created',
'type':'array',
'items':{
'type':'string'
}
},
'files_modified':{
'description':'if any of these files were modified',
'type':'array',
'items':{
'type':'string'
}
},
'always':{
'description':'run always',
'type':'boolean'
},
'not_exist':{
'description':'list of absolute paths (can use glob match) to files to check for existence',
'type':'array',
'items':{
'type':'string'
}
}
}
}
},
'required': ['execute', 'when']
}
}
def get_service_actions(service_dir, service_name, service_ver, config):
"""
Gets the actions supported by a service
Keyword arguments:
service_dir -- top level service directory
service_name -- name of service
service_ver -- service version
config -- dict to template action with
"""
action_desc = _get_service_file(service_dir, service_name, service_ver,
'actions', config=config)
result = {}
for k,v in action_desc.items():
if '.' in k:
raise ValueError('Key {} in {} v{} is invalid, no dots allowed'.format(
k, service_name, service_ver))
jsonschema.validate(v, _action_schema)
if 'not_after' in v:
# Prepend service name and dots to any not_after items
not_after = []
for subitem in v['not_after']:
if '.' not in subitem:
not_after.append(service_name + '.' + subitem)
else:
not_after.append(subitem)
v['not_after'] = not_after
# Prepend dot for the action name always
result[service_name + '.' + k] = v
return result
def _prepend_service_name(service_name, files):
ret = []
for f in files:
if not os.path.isabs(f):
ret.append(os.path.join(service_name, f))
else:
ret.append(f)
return ret
def get_reactions(service_dir, service_name, service_ver, config):
"""
Gets the reaction description from a service.
Keyword arguments:
service_dir -- top level service directory
service_name -- name of service
service_ver -- service version
config -- dict to template reaction with
"""
reactions_desc = _get_service_file(service_dir, service_name, service_ver,
'react', config=config)
jsonschema.validate(reactions_desc, _reaction_schema)
for reaction in reactions_desc:
actions = []
for action in reaction['execute']:
if '.' not in action:
actions.append(service_name + '.' + action)
else:
actions.append(action)
when = reaction['when']
# Prepend service name if relative path
for i in ['files_modified', 'files_created', 'files']:
if i in when:
when[i] = _prepend_service_name(service_name, when[i])
reaction['execute'] = actions
return reactions_desc
def _checkfiles(filepatterns, paths_changed):
"""
Checks to see if any of the files changed match any of the file
patterns given. File patterns implicitly start at the root of the
deployment directory.
Keyword arguments:
filepatterns -- list of glob-style patterns
paths_changed -- list of paths changed, each item is relative to the
base deployment directory
"""
for pattern in filepatterns:
if fnmatch.filter(paths_changed, pattern):
return True
return False
def _runcommand(command, retcode):
ret = subprocess.call(command, shell=True)
return ret != retcode
def _checknotexists(files):
for f in files:
if not any(glob.iglob(f)):
return True
return False
def _execute(command, resources):
args = command.split()
executable = args[0]
tmp_file = None
try:
prefix = 'resources' + os.path.sep
if executable.startswith(prefix):
log.debug('%s in "%s" is a resource', executable, command)
tmp_file = tempfile.NamedTemporaryFile(prefix='sq-', suffix='-cmd', delete=False)
script = resources[executable[len(prefix):]]()
tmp_file.write(script)
stat_result = os.fstat(tmp_file.fileno())
new_mode = stat_result.st_mode | stat.S_IXUSR
os.fchmod(tmp_file.fileno(), new_mode)
tmp_file.close()
args[0] = tmp_file.name
log.debug('Executing %s in dir %s', args, os.getcwd())
subprocess.check_call(' '.join(args), shell=True)
finally:
if tmp_file:
os.remove(tmp_file.name)
def react(actions, reactions, paths_changed, new_files, base_dir, resources):
"""
Performs actions based on reaction criteria. Each action is only performed
once, and reactions are handled in order.
Keyword arguments:
actions -- map of action names to action descriptions
reactions -- list of reactions to check for
paths_changes -- list of files that were updated
new_files -- list of files that are new this run
"""
done_actions = set()
for reaction in reactions:
run_action = False
when = reaction['when']
if 'always' in when and when['always']:
run_action = True
elif 'command' in when and _runcommand(when['command'], when['exitcode_not']):
run_action = True
elif 'files' in when and _checkfiles(when['files'], paths_changed + new_files):
run_action = True
elif 'files_modified' in when and _checkfiles(when['files_modified'], paths_changed):
run_action = True
elif 'files_created' in when and _checkfiles(when['files_created'], new_files):
run_action = True
elif 'not_exist' in when and _checknotexists(when['not_exist']):
run_action = True
if not run_action:
log.debug("Not running reaction {}".format(reaction))
continue
# Run action
for action in reaction['execute']:
log.info("Running action {} in reaction {}".format(action, reaction))
if action in actions:
if action not in done_actions:
# Actions must be unique
not_after = set()
action_item = actions[action]
if 'not_after' in action_item:
not_after = set(action_item['not_after'])
if len(done_actions.intersection(not_after)) == 0:
# Let's do this
service_name = os.path.splitext(action)[0]
if 'chdir' in action_item:
chdir = action_item['chdir']
if not os.path.isabs(chdir):
chdir = os.path.join(base_dir, service_name, chdir)
else:
# For the commands, put us in the service directory
# so that relative commands will work
chdir = os.path.join(base_dir, service_name)
with dirio.SafeChdir(chdir):
for command in action_item['commands']:
try:
_execute(command, resources)
except subprocess.CalledProcessError as e:
log.error("Command {} errored with code {}".format(command, e.returncode))
raise e
done_actions.add(action)
else:
raise ValueError(
'Action {} from reaction {} not in action list'.format(
action, reaction))
| mit |
AnotherIvan/calibre | src/tinycss/fonts3.py | 14 | 1692 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.css21 import CSS21Parser, ParseError
class FontFaceRule(object):
at_keyword = '@font-face'
__slots__ = 'declarations', 'line', 'column'
def __init__(self, declarations, line, column):
self.declarations = declarations
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} at {0.line}:{0.column}>'
.format(self))
class CSSFonts3Parser(CSS21Parser):
''' Parse @font-face rules from the CSS 3 fonts module '''
ALLOWED_CONTEXTS_FOR_FONT_FACE = {'stylesheet', '@media', '@page'}
def __init__(self):
super(CSSFonts3Parser, self).__init__()
self.at_parsers['@font-face'] = self.parse_font_face_rule
def parse_font_face_rule(self, rule, previous_rules, errors, context):
if context not in self.ALLOWED_CONTEXTS_FOR_FONT_FACE:
raise ParseError(rule,
'@font-face rule not allowed in ' + context)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
if rule.head:
raise ParseError(rule, '{0} rule is not allowed to have content before the descriptor declaration'.format(rule.at_keyword))
declarations, decerrors = self.parse_declaration_list(rule.body)
errors.extend(decerrors)
return FontFaceRule(declarations, rule.line, rule.column)
| gpl-3.0 |
ProkopHapala/SimpleSimulationEngine | python/pySimE/space/exp/pykep/lambert_Prokop.py | 1 | 1376 |
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP import epoch, DAY2SEC, planet_ss, AU, MU_SUN, lambert_problem
from PyKEP.orbit_plots import plot_planet, plot_lambert
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
t1 = epoch(0)
t2 = epoch(740)
dt = (t2.mjd2000 - t1.mjd2000) * DAY2SEC
ax.scatter(0,0,0, color='y')
pl = planet_ss('earth')
plot_planet(ax,pl, t0=t1, color=(0.8,0.8,1), legend=True, units = AU)
rE,vE = pl.eph(t1)
pl = planet_ss('mars')
plot_planet(ax,pl, t0=t2, color=(0.8,0.8,1), legend=True, units = AU)
rM, vM = pl.eph(t2)
l = lambert_problem(rE,rM,dt,MU_SUN)
nmax = l.get_Nmax()
print "max number of revolutions",nmax
plot_lambert(ax,l , color=(1,0,0), legend=True, units = AU)
for i in range(1,nmax*2+1):
print i
plot_lambert(ax,l,sol=i, color=(1,0,i/float(nmax*2)), legend=True, units = AU)
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
axisEqual3D(ax)
plt.show()
| mit |
verdurin/easybuild-easyblocks | easybuild/easyblocks/generic/configuremake.py | 10 | 4818 | ##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for software that uses the GNU installation procedure,
i.e. configure/make/make install, implemented as an easyblock.
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Toon Willems (Ghent University)
"""
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.run import run_cmd
class ConfigureMake(EasyBlock):
"""
Support for building and installing applications with configure/make/make install
"""
@staticmethod
def extra_options(extra_vars=None):
"""Extra easyconfig parameters specific to ConfigureMake."""
extra_vars = EasyBlock.extra_options(extra_vars)
extra_vars.update({
'configure_cmd_prefix': ['', "Prefix to be glued before ./configure", CUSTOM],
'prefix_opt': ['--prefix=', "Prefix command line option for configure script", CUSTOM],
'tar_config_opts': [False, "Override tar settings as determined by configure.", CUSTOM],
})
return extra_vars
def configure_step(self, cmd_prefix=''):
"""
Configure step
- typically ./configure --prefix=/install/path style
"""
if self.cfg['configure_cmd_prefix']:
if cmd_prefix:
tup = (cmd_prefix, self.cfg['configure_cmd_prefix'])
self.log.debug("Specified cmd_prefix '%s' is overruled by configure_cmd_prefix '%s'" % tup)
cmd_prefix = self.cfg['configure_cmd_prefix']
if self.cfg['tar_config_opts']:
# setting am_cv_prog_tar_ustar avoids that configure tries to figure out
# which command should be used for tarring/untarring
# am__tar and am__untar should be set to something decent (tar should work)
tar_vars = {
'am__tar': 'tar chf - "$$tardir"',
'am__untar': 'tar xf -',
'am_cv_prog_tar_ustar': 'easybuild_avoid_ustar_testing'
}
for (key, val) in tar_vars.items():
self.cfg.update('preconfigopts', "%s='%s'" % (key, val))
cmd = "%(preconfigopts)s %(cmd_prefix)s./configure %(prefix_opt)s%(installdir)s %(configopts)s" % {
'preconfigopts': self.cfg['preconfigopts'],
'cmd_prefix': cmd_prefix,
'prefix_opt': self.cfg['prefix_opt'],
'installdir': self.installdir,
'configopts': self.cfg['configopts'],
}
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
def build_step(self, verbose=False, path=None):
"""
Start the actual build
- typical: make -j X
"""
paracmd = ''
if self.cfg['parallel']:
paracmd = "-j %s" % self.cfg['parallel']
cmd = "%s make %s %s" % (self.cfg['prebuildopts'], paracmd, self.cfg['buildopts'])
(out, _) = run_cmd(cmd, path=path, log_all=True, simple=False, log_output=verbose)
return out
def test_step(self):
"""
Test the compilation
- default: None
"""
if self.cfg['runtest']:
cmd = "make %s" % (self.cfg['runtest'])
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
def install_step(self):
"""
Create the installation in correct location
- typical: make install
"""
cmd = "%s make install %s" % (self.cfg['preinstallopts'], self.cfg['installopts'])
(out, _) = run_cmd(cmd, log_all=True, simple=False)
return out
| gpl-2.0 |
not-nexus/shelf | tests/route_tester/base.py | 2 | 5737 | import json
import re
class Base(object):
def __init__(self, test, test_client):
self.test_client = test_client
self.test = test
self._endpoint = None
self.params = None
self.status_code = None
self.response = None
self.headers = None
self.route = None
@property
def endpoint(self):
if not self._endpoint:
self._endpoint = self.route
if self.params:
self._endpoint = self.route.format(**self.params)
# This is specifically for the search route but I cannot see any draw
# backs with having this here.
if re.search("//", self._endpoint):
self._endpoint = re.sub("//", "/", self._endpoint)
return self._endpoint
def route_params(self, **params):
"""
Sets the route parameters
Arg:
params(dict): Names and values of route parameters.
Returns:
self(route_tester.base.Base)
"""
self.params = params
return self
def expect(self, status_code, response=None, headers=None):
"""
Sets the expected status_code and response. To be asserted.
Arg:
status_code(int): status code of response expected.
response(object): expected decoded response.
Returns:
self(route_test.base.Base)
"""
self.status_code = status_code
self.response = response
self.headers = None
if headers:
self.headers = self._normalize_headers(headers)
return self
def get(self, data=None, headers=None):
"""
Performs a GET request on the test client and asserts the response.
Args:
data(dict | None): data for request.
headers(dict | None): headers for request.
"""
data = self._encode(data)
response = self.test_client.get(self.endpoint, data=data, headers=headers)
self._assert(response)
def post(self, data=None, headers=None):
"""
Performs a POST request on the test client and asserts the response.
Args:
data(dict | None): data for request.
headers(dict | None): headers for request.
"""
data = self._encode(data)
response = self.test_client.post(self.endpoint, data=data, headers=headers)
self._assert(response)
def put(self, data=None, headers=None):
"""
Performs a PUT request on the test client and asserts the response.
Args:
data(dict | None): data for request.
headers(dict | None): headers for request.
"""
data = self._encode(data)
response = self.test_client.put(self.endpoint, data=data, headers=headers)
self._assert(response)
def delete(self, data=None, headers=None):
"""
Performs a DELETE request on the test client and asserts the response.
Args:
data(dict | None): data for request.
headers(dict | None): headers for request.
"""
data = self._encode(data)
response = self.test_client.delete(self.endpoint, data=data, headers=headers)
self._assert(response)
def head(self, data=None, headers=None):
"""
Permforms a HEAD request on the test client and asserts the response.
Args:
data(dict | None): data for request.
headers(dict | None): headers for request.
"""
data = self._encode(data)
response = self.test_client.head(self.endpoint, data=data, headers=headers)
self._assert(response)
def _assert(self, actual_response):
if self.status_code:
self.test.assertEqual(self.status_code, actual_response.status_code)
if self.response:
data = actual_response.get_data()
try:
actual = json.loads(data)
except ValueError:
actual = data
if isinstance(actual, basestring):
self.test.assertEqual(self.response, actual)
else:
self.test.asserts.json_equals(self.response, actual)
if self.headers:
self._assert_headers(actual_response.headers)
def _assert_headers(self, actual_headers):
"""
Validates that the expected headers match the actual headers.
Args:
actual_headers(werkzeug.datastructures.EnvironHeaders)
"""
for key, expected_list in self.headers.iteritems():
actual_list = actual_headers.getlist(key)
self.test.assertEqual(expected_list, actual_list)
def _encode(self, data):
if data is not None:
try:
data = json.dumps(data)
except TypeError:
pass
return data
def _normalize_headers(self, headers):
"""
Forces all values to be a list if it is
not already a list. This is so that I have
an expected format to assert against later.
This change is supposed to fix the problem where
we can have multiple of the same response header
Args:
headers(dict): Orginal headers
Returns:
dict<list>
"""
new_headers = {}
for key, value in headers.iteritems():
if not isinstance(value, list):
value = [value]
new_headers[key] = value
return new_headers
| mit |
jonpry/PHDL | ply/lex.py | 2 | 42889 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.7'
__tabversion__ = '3.5'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = %s\n' % repr(self.lextokens))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
lines, linen = inspect.getsourcelines(module)
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| gpl-2.0 |
kennedyshead/home-assistant | homeassistant/components/recorder/history.py | 2 | 12888 | """Provide pre-made queries on top of the recorder component."""
from __future__ import annotations
from collections import defaultdict
from itertools import groupby
import logging
import time
from sqlalchemy import and_, bindparam, func
from sqlalchemy.ext import baked
from homeassistant.components import recorder
from homeassistant.components.recorder.models import (
States,
process_timestamp_to_utc_isoformat,
)
from homeassistant.components.recorder.util import execute, session_scope
from homeassistant.core import split_entity_id
import homeassistant.util.dt as dt_util
from .models import LazyState
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
STATE_KEY = "state"
LAST_CHANGED_KEY = "last_changed"
SIGNIFICANT_DOMAINS = (
"climate",
"device_tracker",
"humidifier",
"thermostat",
"water_heater",
)
IGNORE_DOMAINS = ("zone", "scene")
NEED_ATTRIBUTE_DOMAINS = {
"climate",
"humidifier",
"input_datetime",
"thermostat",
"water_heater",
}
QUERY_STATES = [
States.domain,
States.entity_id,
States.state,
States.attributes,
States.last_changed,
States.last_updated,
]
HISTORY_BAKERY = "recorder_history_bakery"
def async_setup(hass):
"""Set up the history hooks."""
hass.data[HISTORY_BAKERY] = baked.bakery()
def get_significant_states(hass, *args, **kwargs):
"""Wrap _get_significant_states with a sql session."""
with session_scope(hass=hass) as session:
return _get_significant_states(hass, session, *args, **kwargs)
def _get_significant_states(
hass,
session,
start_time,
end_time=None,
entity_ids=None,
filters=None,
include_start_time_state=True,
significant_changes_only=True,
minimal_response=False,
):
"""
Return states changes during UTC period start_time - end_time.
Significant states are all states where there is a state change,
as well as all states from certain domains (for instance
thermostat so that we get current temperature in our graphs).
"""
timer_start = time.perf_counter()
baked_query = hass.data[HISTORY_BAKERY](
lambda session: session.query(*QUERY_STATES)
)
if significant_changes_only:
baked_query += lambda q: q.filter(
(
States.domain.in_(SIGNIFICANT_DOMAINS)
| (States.last_changed == States.last_updated)
)
& (States.last_updated > bindparam("start_time"))
)
else:
baked_query += lambda q: q.filter(States.last_updated > bindparam("start_time"))
if entity_ids is not None:
baked_query += lambda q: q.filter(
States.entity_id.in_(bindparam("entity_ids", expanding=True))
)
else:
baked_query += lambda q: q.filter(~States.domain.in_(IGNORE_DOMAINS))
if filters:
filters.bake(baked_query)
if end_time is not None:
baked_query += lambda q: q.filter(States.last_updated < bindparam("end_time"))
baked_query += lambda q: q.order_by(States.entity_id, States.last_updated)
states = execute(
baked_query(session).params(
start_time=start_time, end_time=end_time, entity_ids=entity_ids
)
)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug("get_significant_states took %fs", elapsed)
return _sorted_states_to_dict(
hass,
session,
states,
start_time,
entity_ids,
filters,
include_start_time_state,
minimal_response,
)
def state_changes_during_period(hass, start_time, end_time=None, entity_id=None):
"""Return states changes during UTC period start_time - end_time."""
with session_scope(hass=hass) as session:
baked_query = hass.data[HISTORY_BAKERY](
lambda session: session.query(*QUERY_STATES)
)
baked_query += lambda q: q.filter(
(States.last_changed == States.last_updated)
& (States.last_updated > bindparam("start_time"))
)
if end_time is not None:
baked_query += lambda q: q.filter(
States.last_updated < bindparam("end_time")
)
if entity_id is not None:
baked_query += lambda q: q.filter_by(entity_id=bindparam("entity_id"))
entity_id = entity_id.lower()
baked_query += lambda q: q.order_by(States.entity_id, States.last_updated)
states = execute(
baked_query(session).params(
start_time=start_time, end_time=end_time, entity_id=entity_id
)
)
entity_ids = [entity_id] if entity_id is not None else None
return _sorted_states_to_dict(hass, session, states, start_time, entity_ids)
def get_last_state_changes(hass, number_of_states, entity_id):
"""Return the last number_of_states."""
start_time = dt_util.utcnow()
with session_scope(hass=hass) as session:
baked_query = hass.data[HISTORY_BAKERY](
lambda session: session.query(*QUERY_STATES)
)
baked_query += lambda q: q.filter(States.last_changed == States.last_updated)
if entity_id is not None:
baked_query += lambda q: q.filter_by(entity_id=bindparam("entity_id"))
entity_id = entity_id.lower()
baked_query += lambda q: q.order_by(
States.entity_id, States.last_updated.desc()
)
baked_query += lambda q: q.limit(bindparam("number_of_states"))
states = execute(
baked_query(session).params(
number_of_states=number_of_states, entity_id=entity_id
)
)
entity_ids = [entity_id] if entity_id is not None else None
return _sorted_states_to_dict(
hass,
session,
reversed(states),
start_time,
entity_ids,
include_start_time_state=False,
)
def get_states(hass, utc_point_in_time, entity_ids=None, run=None, filters=None):
"""Return the states at a specific point in time."""
if run is None:
run = recorder.run_information_from_instance(hass, utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
with session_scope(hass=hass) as session:
return _get_states_with_session(
hass, session, utc_point_in_time, entity_ids, run, filters
)
def _get_states_with_session(
hass, session, utc_point_in_time, entity_ids=None, run=None, filters=None
):
"""Return the states at a specific point in time."""
if entity_ids and len(entity_ids) == 1:
return _get_single_entity_states_with_session(
hass, session, utc_point_in_time, entity_ids[0]
)
if run is None:
run = recorder.run_information_with_session(session, utc_point_in_time)
# History did not run before utc_point_in_time
if run is None:
return []
# We have more than one entity to look at (most commonly we want
# all entities,) so we need to do a search on all states since the
# last recorder run started.
query = session.query(*QUERY_STATES)
most_recent_states_by_date = session.query(
States.entity_id.label("max_entity_id"),
func.max(States.last_updated).label("max_last_updated"),
).filter(
(States.last_updated >= run.start) & (States.last_updated < utc_point_in_time)
)
if entity_ids:
most_recent_states_by_date.filter(States.entity_id.in_(entity_ids))
most_recent_states_by_date = most_recent_states_by_date.group_by(States.entity_id)
most_recent_states_by_date = most_recent_states_by_date.subquery()
most_recent_state_ids = session.query(
func.max(States.state_id).label("max_state_id")
).join(
most_recent_states_by_date,
and_(
States.entity_id == most_recent_states_by_date.c.max_entity_id,
States.last_updated == most_recent_states_by_date.c.max_last_updated,
),
)
most_recent_state_ids = most_recent_state_ids.group_by(States.entity_id)
most_recent_state_ids = most_recent_state_ids.subquery()
query = query.join(
most_recent_state_ids,
States.state_id == most_recent_state_ids.c.max_state_id,
)
if entity_ids is not None:
query = query.filter(States.entity_id.in_(entity_ids))
else:
query = query.filter(~States.domain.in_(IGNORE_DOMAINS))
if filters:
query = filters.apply(query)
return [LazyState(row) for row in execute(query)]
def _get_single_entity_states_with_session(hass, session, utc_point_in_time, entity_id):
# Use an entirely different (and extremely fast) query if we only
# have a single entity id
baked_query = hass.data[HISTORY_BAKERY](
lambda session: session.query(*QUERY_STATES)
)
baked_query += lambda q: q.filter(
States.last_updated < bindparam("utc_point_in_time"),
States.entity_id == bindparam("entity_id"),
)
baked_query += lambda q: q.order_by(States.last_updated.desc())
baked_query += lambda q: q.limit(1)
query = baked_query(session).params(
utc_point_in_time=utc_point_in_time, entity_id=entity_id
)
return [LazyState(row) for row in execute(query)]
def _sorted_states_to_dict(
hass,
session,
states,
start_time,
entity_ids,
filters=None,
include_start_time_state=True,
minimal_response=False,
):
"""Convert SQL results into JSON friendly data structure.
This takes our state list and turns it into a JSON friendly data
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
States must be sorted by entity_id and last_updated
We also need to go back and create a synthetic zero data point for
each list of states, otherwise our graphs won't start on the Y
axis correctly.
"""
result = defaultdict(list)
# Set all entity IDs to empty lists in result set to maintain the order
if entity_ids is not None:
for ent_id in entity_ids:
result[ent_id] = []
# Get the states at the start time
timer_start = time.perf_counter()
if include_start_time_state:
run = recorder.run_information_from_instance(hass, start_time)
for state in _get_states_with_session(
hass, session, start_time, entity_ids, run=run, filters=filters
):
state.last_changed = start_time
state.last_updated = start_time
result[state.entity_id].append(state)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
_LOGGER.debug("getting %d first datapoints took %fs", len(result), elapsed)
# Called in a tight loop so cache the function
# here
_process_timestamp_to_utc_isoformat = process_timestamp_to_utc_isoformat
# Append all changes to it
for ent_id, group in groupby(states, lambda state: state.entity_id):
domain = split_entity_id(ent_id)[0]
ent_results = result[ent_id]
if not minimal_response or domain in NEED_ATTRIBUTE_DOMAINS:
ent_results.extend(LazyState(db_state) for db_state in group)
# With minimal response we only provide a native
# State for the first and last response. All the states
# in-between only provide the "state" and the
# "last_changed".
if not ent_results:
ent_results.append(LazyState(next(group)))
prev_state = ent_results[-1]
initial_state_count = len(ent_results)
for db_state in group:
# With minimal response we do not care about attribute
# changes so we can filter out duplicate states
if db_state.state == prev_state.state:
continue
ent_results.append(
{
STATE_KEY: db_state.state,
LAST_CHANGED_KEY: _process_timestamp_to_utc_isoformat(
db_state.last_changed
),
}
)
prev_state = db_state
if prev_state and len(ent_results) != initial_state_count:
# There was at least one state change
# replace the last minimal state with
# a full state
ent_results[-1] = LazyState(prev_state)
# Filter out the empty lists if some states had 0 results.
return {key: val for key, val in result.items() if val}
def get_state(hass, utc_point_in_time, entity_id, run=None):
"""Return a state at a specific point in time."""
states = get_states(hass, utc_point_in_time, (entity_id,), run)
return states[0] if states else None
| apache-2.0 |
bdelliott/wordgame | web/django/conf/locale/nb/formats.py | 685 | 1657 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| mit |
bertyhell/moviepy | moviepy/video/tools/segmenting.py | 16 | 1826 | import numpy as np
import scipy.ndimage as ndi
from moviepy.video.VideoClip import ImageClip
def findObjects(clip,rem_thr=500, preview=False):
"""
Returns a list of ImageClips representing each a separate object on
the screen.
rem_thr : all objects found with size < rem_Thr will be
considered false positives and will be removed
"""
image = clip.get_frame(0)
if clip.mask is None:
clip = clip.add_mask()
mask = clip.mask.get_frame(0)
labelled, num_features = ndi.measurements.label(image[:,:,0])
#find the objects
slices = ndi.find_objects(labelled)
# cool trick to remove letter holes (in o,e,a, etc.)
slices = [e for e in slices if mask[e[0],e[1]].mean() >0.2]
# remove very small slices
slices = [e for e in slices if image[e[0],e[1]].size > rem_thr]
# Sort the slices from left to right
islices = sorted(enumerate(slices), key = lambda s : s[1][1].start)
letters = []
for i,(ind,(sy,sx)) in enumerate(islices):
""" crop each letter separately """
sy = slice(sy.start-1,sy.stop+1)
sx = slice(sx.start-1,sx.stop+1)
letter = image[sy,sx]
labletter = labelled[sy,sx]
maskletter = (labletter==(ind+1))*mask[sy,sx]
letter = ImageClip(image[sy,sx])
letter.mask = ImageClip( maskletter,ismask=True)
letter.screenpos = np.array((sx.start,sy.start))
letters.append(letter)
if preview:
import matplotlib.pyplot as plt
print( "found %d objects"%(num_features) )
fig,ax = plt.subplots(2)
ax[0].axis('off')
ax[0].imshow(labelled)
ax[1].imshow([range(num_features)],interpolation='nearest')
ax[1].set_yticks([])
plt.show()
return letters
| mit |
gitfred/fuel-extension-volume-manager | volume_manager/tests/test_objects.py | 1 | 1473 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.test.base import BaseTestCase
from volume_manager.models.node_volumes import NodeVolumes
from volume_manager.objects.volumes import VolumeObject
class TestExtension(BaseTestCase):
def test_delete_by_node_ids(self):
volumes = [
{'node_id': 1, 'volumes': 'volume_1'},
{'node_id': 2, 'volumes': 'volume_2'},
{'node_id': 3, 'volumes': 'volume_3'}]
for volume in volumes:
self.db.add(NodeVolumes(**volume))
self.db.commit()
self.assertEqual(self.db.query(NodeVolumes).count(), 3)
VolumeObject.delete_by_node_ids([1, 2])
self.assertEqual(self.db.query(NodeVolumes).count(), 1)
volume = self.db.query(NodeVolumes).first()
self.assertEqual(volume.node_id, 3)
self.assertEqual(volume.volumes, 'volume_3')
| apache-2.0 |
unseenlaser/python-for-android | python-build/python-libs/gdata/build/lib/atom/client.py | 133 | 5265 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AtomPubClient provides CRUD ops. in line with the Atom Publishing Protocol.
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.http_core
class AtomPubClient(object):
host = None
auth_token = None
def __init__(self, http_client=None, host=None,
auth_token=None, source=None, **kwargs):
"""Creates a new AtomPubClient instance.
Args:
source: The name of your application.
http_client: An object capable of performing HTTP requests through a
request method. This object is used to perform the request
when the AtomPubClient's request method is called. Used to
allow HTTP requests to be directed to a mock server, or use
an alternate library instead of the default of httplib to
make HTTP requests.
host: str The default host name to use if a host is not specified in the
requested URI.
auth_token: An object which sets the HTTP Authorization header when its
modify_request method is called.
"""
self.http_client = http_client or atom.http_core.ProxiedHttpClient()
if host is not None:
self.host = host
if auth_token is not None:
self.auth_token = auth_token
self.source = source
def request(self, method=None, uri=None, auth_token=None,
http_request=None, **kwargs):
"""Performs an HTTP request to the server indicated.
Uses the http_client instance to make the request.
Args:
method: The HTTP method as a string, usually one of 'GET', 'POST',
'PUT', or 'DELETE'
uri: The URI desired as a string or atom.http_core.Uri.
http_request:
auth_token: An authorization token object whose modify_request method
sets the HTTP Authorization header.
"""
# Modify the request based on the AtomPubClient settings and parameters
# passed in to the request.
http_request = self.modify_request(http_request)
if isinstance(uri, (str, unicode)):
uri = atom.http_core.Uri.parse_uri(uri)
if uri is not None:
uri.modify_request(http_request)
if isinstance(method, (str, unicode)):
http_request.method = method
# Any unrecognized arguments are assumed to be capable of modifying the
# HTTP request.
for name, value in kwargs.iteritems():
if value is not None:
value.modify_request(http_request)
# Default to an http request if the protocol scheme is not set.
if http_request.uri.scheme is None:
http_request.uri.scheme = 'http'
if http_request.uri.path is None:
http_request.uri.path = '/'
# Add the Authorization header at the very end. The Authorization header
# value may need to be calculated using information in the request.
if auth_token:
auth_token.modify_request(http_request)
elif self.auth_token:
self.auth_token.modify_request(http_request)
# Perform the fully specified request using the http_client instance.
# Sends the request to the server and returns the server's response.
return self.http_client.request(http_request)
Request = request
def get(self, uri=None, auth_token=None, http_request=None, **kwargs):
return self.request(method='GET', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Get = get
def post(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
return self.request(method='POST', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Post = post
def put(self, uri=None, data=None, auth_token=None, http_request=None,
**kwargs):
return self.request(method='PUT', uri=uri, auth_token=auth_token,
http_request=http_request, data=data, **kwargs)
Put = put
def delete(self, uri=None, auth_token=None, http_request=None, **kwargs):
return self.request(method='DELETE', uri=uri, auth_token=auth_token,
http_request=http_request, **kwargs)
Delete = delete
def modify_request(self, http_request):
if http_request is None:
http_request = atom.http_core.HttpRequest()
if self.host is not None and http_request.uri.host is None:
http_request.uri.host = self.host
# Set the user agent header for logging purposes.
if self.source:
http_request.headers['User-Agent'] = '%s gdata-py/2.0.1' % self.source
else:
http_request.headers['User-Agent'] = 'gdata-py/2.0.1'
return http_request
ModifyRequest = modify_request
| apache-2.0 |
vlinhd11/vlinhd11-android-scripting | python/src/Demo/curses/ncurses.py | 32 | 6704 | #!/usr/bin/env python
#
# $Id: ncurses.py 66424 2008-09-13 01:22:08Z andrew.kuchling $
#
# (n)curses exerciser in Python, an interactive test for the curses
# module. Currently, only the panel demos are ported.
import curses
from curses import panel
def wGetchar(win = None):
if win is None: win = stdscr
return win.getch()
def Getchar():
wGetchar()
#
# Panels tester
#
def wait_a_while():
if nap_msec == 1:
Getchar()
else:
curses.napms(nap_msec)
def saywhat(text):
stdscr.move(curses.LINES - 1, 0)
stdscr.clrtoeol()
stdscr.addstr(text)
def mkpanel(color, rows, cols, tly, tlx):
win = curses.newwin(rows, cols, tly, tlx)
pan = panel.new_panel(win)
if curses.has_colors():
if color == curses.COLOR_BLUE:
fg = curses.COLOR_WHITE
else:
fg = curses.COLOR_BLACK
bg = color
curses.init_pair(color, fg, bg)
win.bkgdset(ord(' '), curses.color_pair(color))
else:
win.bkgdset(ord(' '), curses.A_BOLD)
return pan
def pflush():
panel.update_panels()
curses.doupdate()
def fill_panel(pan):
win = pan.window()
num = pan.userptr()[1]
win.move(1, 1)
win.addstr("-pan%c-" % num)
win.clrtoeol()
win.box()
maxy, maxx = win.getmaxyx()
for y in range(2, maxy - 1):
for x in range(1, maxx - 1):
win.move(y, x)
win.addch(num)
def demo_panels(win):
global stdscr, nap_msec, mod
stdscr = win
nap_msec = 1
mod = ["test", "TEST", "(**)", "*()*", "<-->", "LAST"]
stdscr.refresh()
for y in range(0, curses.LINES - 1):
for x in range(0, curses.COLS):
stdscr.addstr("%d" % ((y + x) % 10))
for y in range(0, 1):
p1 = mkpanel(curses.COLOR_RED,
curses.LINES // 2 - 2,
curses.COLS // 8 + 1,
0,
0)
p1.set_userptr("p1")
p2 = mkpanel(curses.COLOR_GREEN,
curses.LINES // 2 + 1,
curses.COLS // 7,
curses.LINES // 4,
curses.COLS // 10)
p2.set_userptr("p2")
p3 = mkpanel(curses.COLOR_YELLOW,
curses.LINES // 4,
curses.COLS // 10,
curses.LINES // 2,
curses.COLS // 9)
p3.set_userptr("p3")
p4 = mkpanel(curses.COLOR_BLUE,
curses.LINES // 2 - 2,
curses.COLS // 8,
curses.LINES // 2 - 2,
curses.COLS // 3)
p4.set_userptr("p4")
p5 = mkpanel(curses.COLOR_MAGENTA,
curses.LINES // 2 - 2,
curses.COLS // 8,
curses.LINES // 2,
curses.COLS // 2 - 2)
p5.set_userptr("p5")
fill_panel(p1)
fill_panel(p2)
fill_panel(p3)
fill_panel(p4)
fill_panel(p5)
p4.hide()
p5.hide()
pflush()
saywhat("press any key to continue")
wait_a_while()
saywhat("h3 s1 s2 s4 s5;press any key to continue")
p1.move(0, 0)
p3.hide()
p1.show()
p2.show()
p4.show()
p5.show()
pflush()
wait_a_while()
saywhat("s1; press any key to continue")
p1.show()
pflush()
wait_a_while()
saywhat("s2; press any key to continue")
p2.show()
pflush()
wait_a_while()
saywhat("m2; press any key to continue")
p2.move(curses.LINES // 3 + 1, curses.COLS // 8)
pflush()
wait_a_while()
saywhat("s3; press any key to continue")
p3.show()
pflush()
wait_a_while()
saywhat("m3; press any key to continue")
p3.move(curses.LINES // 4 + 1, curses.COLS // 15)
pflush()
wait_a_while()
saywhat("b3; press any key to continue")
p3.bottom()
pflush()
wait_a_while()
saywhat("s4; press any key to continue")
p4.show()
pflush()
wait_a_while()
saywhat("s5; press any key to continue")
p5.show()
pflush()
wait_a_while()
saywhat("t3; press any key to continue")
p3.top()
pflush()
wait_a_while()
saywhat("t1; press any key to continue")
p1.show()
pflush()
wait_a_while()
saywhat("t2; press any key to continue")
p2.show()
pflush()
wait_a_while()
saywhat("t3; press any key to continue")
p3.show()
pflush()
wait_a_while()
saywhat("t4; press any key to continue")
p4.show()
pflush()
wait_a_while()
for itmp in range(0, 6):
w4 = p4.window()
w5 = p5.window()
saywhat("m4; press any key to continue")
w4.move(curses.LINES // 8, 1)
w4.addstr(mod[itmp])
p4.move(curses.LINES // 6, itmp * curses.COLS // 8)
w5.move(curses.LINES // 6, 1)
w5.addstr(mod[itmp])
pflush()
wait_a_while()
saywhat("m5; press any key to continue")
w4.move(curses.LINES // 6, 1)
w4.addstr(mod[itmp])
p5.move(curses.LINES // 3 - 1, itmp * 10 + 6)
w5.move(curses.LINES // 8, 1)
w5.addstr(mod[itmp])
pflush()
wait_a_while()
saywhat("m4; press any key to continue")
p4.move(curses.LINES // 6, (itmp + 1) * curses.COLS // 8)
pflush()
wait_a_while()
saywhat("t5; press any key to continue")
p5.top()
pflush()
wait_a_while()
saywhat("t2; press any key to continue")
p2.top()
pflush()
wait_a_while()
saywhat("t1; press any key to continue")
p1.top()
pflush()
wait_a_while()
saywhat("d2; press any key to continue")
del p2
pflush()
wait_a_while()
saywhat("h3; press any key to continue")
p3.hide()
pflush()
wait_a_while()
saywhat("d1; press any key to continue")
del p1
pflush()
wait_a_while()
saywhat("d4; press any key to continue")
del p4
pflush()
wait_a_while()
saywhat("d5; press any key to continue")
del p5
pflush()
wait_a_while()
if nap_msec == 1:
break
nap_msec = 100
#
# one fine day there'll be the menu at this place
#
curses.wrapper(demo_panels)
| apache-2.0 |
siliconsmiley/QGIS | python/plugins/processing/algs/qgis/Polygonize.py | 2 | 5047 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Polygonize.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Piotr Pociask
Email : ppociask at o2 dot pl
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Piotr Pociask'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Piotr Pociask'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsFields, QgsField, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class Polygonize(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELDS = 'FIELDS'
GEOMETRY = 'GEOMETRY'
def processAlgorithm(self, progress):
try:
from shapely.ops import polygonize
from shapely.geometry import Point, MultiLineString
except ImportError:
raise GeoAlgorithmExecutionException(
self.tr('Polygonize algorithm requires shapely module!'))
vlayer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
output = self.getOutputFromName(self.OUTPUT)
vprovider = vlayer.dataProvider()
if self.getParameterValue(self.FIELDS):
fields = vprovider.fields()
else:
fields = QgsFields()
if self.getParameterValue(self.GEOMETRY):
fieldsCount = fields.count()
fields.append(QgsField('area', QVariant.Double, 'double', 16, 2))
fields.append(QgsField('perimeter', QVariant.Double,
'double', 16, 2))
allLinesList = []
features = vector.features(vlayer)
current = 0
progress.setInfo(self.tr('Processing lines...'))
total = 40.0 / float(len(features))
for inFeat in features:
inGeom = inFeat.geometry()
if inGeom.isMultipart():
allLinesList.extend(inGeom.asMultiPolyline())
else:
allLinesList.append(inGeom.asPolyline())
current += 1
progress.setPercentage(int(current * total))
progress.setPercentage(40)
allLines = MultiLineString(allLinesList)
progress.setInfo(self.tr('Noding lines...'))
try:
from shapely.ops import unary_union
allLines = unary_union(allLines)
except ImportError:
allLines = allLines.union(Point(0, 0))
progress.setPercentage(45)
progress.setInfo(self.tr('Polygonizing...'))
polygons = list(polygonize([allLines]))
if not polygons:
raise GeoAlgorithmExecutionException(self.tr('No polygons were created!'))
progress.setPercentage(50)
progress.setInfo('Saving polygons...')
writer = output.getVectorWriter(fields, QGis.WKBPolygon, vlayer.crs())
outFeat = QgsFeature()
current = 0
total = 50.0 / float(len(polygons))
for polygon in polygons:
outFeat.setGeometry(QgsGeometry.fromWkt(polygon.wkt))
if self.getParameterValue(self.GEOMETRY):
outFeat.setAttributes([None] * fieldsCount + [polygon.area,
polygon.length])
writer.addFeature(outFeat)
current += 1
progress.setPercentage(50 + int(current * total))
progress.setInfo(self.tr('Finished'))
del writer
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Polygonize')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterBoolean(self.FIELDS,
self.tr('Keep table structure of line layer'), False))
self.addParameter(ParameterBoolean(self.GEOMETRY,
self.tr('Create geometry columns'), True))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Polygons from lines')))
| gpl-2.0 |
sdanzige/cmonkey-python | cmonkey/network.py | 1 | 13533 | # vi: sw=4 ts=4 et:
"""network.py - cMonkey network module
This file is part of cMonkey Python. Please see README and LICENSE for
more information and licensing details.
"""
import numpy as np
import logging
import os.path
import cmonkey.util as util
import cmonkey.datamatrix as dm
import cmonkey.scoring as scoring
# Python2/Python3 compatibility
try:
xrange
except NameError:
xrange = range
class Network:
"""class to represent a network graph.
The graph is considered undirected
For efficiency reasons, edges is a list of [source, target, weight]
"""
def __init__(self, name, edges, weight, dummy):
"""creates a network from a list of edges"""
self.name = name
self.edges = edges
self.weight = weight
self.__compute_edges_with_source()
def __compute_edges_with_source(self):
self.edges_with_source = {}
for edge in self.edges:
if edge[0] not in self.edges_with_source:
self.edges_with_source[edge[0]] = []
if edge[1] not in self.edges_with_source:
self.edges_with_source[edge[1]] = []
self.edges_with_source[edge[0]].append(edge)
self.edges_with_source[edge[1]].append(edge)
def validate(self, synonyms, genes):
"""Change the names in the network to have the standard names in the
synonyms (elswhere call the thesaurus). Problem: it does not
also rename the ratios matrix to the standard names
Keyword arguments:
synonyms -- The thesaurus.
genes -- The gene names from the ratios.
Usage:
self.validate(synonyms, genes)
"""
# remap first
new_edges = []
for n0, n1, score in self.edges:
n0 = synonyms[n0] if n0 in synonyms else n0
n1 = synonyms[n1] if n1 in synonyms else n1
new_edges.append((n0, n1, score))
self.edges = new_edges
self.__compute_edges_with_source()
# then validate
found = []
for g in genes:
primary = synonyms.get(g, g)
for n0, n1, score in self.edges:
if primary == n0 or primary == n1:
found.append(primary)
if len(found) < len(genes) / 2:
print(edges)
raise(Exception("only %d genes found in edges" % len(found)))
def num_edges(self):
"""returns the number of edges in this graph"""
return len(self.edges)
def total_score(self):
"""returns the sum of edge scores"""
return sum(edge[2] for edge in self.edges) * 2
def normalize_scores_to(self, score):
"""normalizes all edge scores so that they sum up to
the specified score"""
total = self.total_score()
if score != total:
# score_e / score_total * score == score_e * (score_total / score)
# we use this to save a division per loop iteration
scale = float(score) / float(total)
self.edges = [(edge[0], edge[1], edge[2] * scale) for edge in self.edges]
self.__compute_edges_with_source()
def edges_with_node(self, node):
"""returns the edges where node is a node of"""
if node in self.edges_with_source:
return self.edges_with_source[node]
else:
return []
def __repr__(self):
return "Network: %s\n# edges: %d\n" % (self.name,
len(self.edges))
@classmethod
def create(cls, name, edges, weight, organism=None, ratios=None,
check_size=True):
"""standard Factory method"""
logging.debug("Network.create() called with %d edges", len(edges))
if edges is None:
raise Exception("no edges specified in network '%s'" % name)
added = set([])
network_edges = []
nodes = set()
for edge in edges:
nodes.add(edge[0])
nodes.add(edge[1])
"""Shrink the number of edges to the ones that are actually usable. These
are selected by the following considerations:
# 1. check nodes that are in the thesaurus
# 2. check gene names that are in the ratios matrix, but not in the network
# 3. keep the nodes that are in the ratios and are in the thesaurus
"""
num_nodes_orig = len(nodes)
if organism:
thesaurus = organism.thesaurus()
nodes = {n for n in nodes if n in thesaurus}
if ratios:
cano_nodes = {thesaurus[n] for n in nodes}
cano_genes = {thesaurus[row] for row in ratios.row_names
if row in thesaurus}
probes_in = [gene for gene in cano_genes if gene in cano_nodes]
nodes = {n for n in nodes if thesaurus[n] in probes_in}
logging.debug("# nodes in network '%s': %d (of %d)", name, len(nodes), num_nodes_orig)
for edge in edges:
# we ignore self-edges, and edges with nodes not in the final nodes
if edge[0] != edge[1] and edge[0] in nodes and edge[1] in nodes:
key = "%s:%s" % (edge[0], edge[1])
key_rev = "%s:%s" % (edge[1], edge[0])
if key not in added and key_rev not in added:
network_edges.append((edge[0], edge[1], edge[2]))
added.add(key)
added.add(key_rev)
if check_size and len(network_edges) < 10:
raise Exception("Error: only %d edges in network '%s'" % (len(network_edges), name))
logging.debug("Created network '%s' with %d edges", name, len(network_edges))
return Network(name, network_edges, weight, 0)
COMPUTE_NETWORK = None
ALL_GENES = None
NETWORK_SCORE_MEMBERSHIP = None
def compute_network_scores(cluster):
"""Generic method to compute network scores"""
global COMPUTE_NETWORK, ALL_GENES, NETWORK_SCORE_MEMBERSHIP
network = COMPUTE_NETWORK
genes = sorted(NETWORK_SCORE_MEMBERSHIP.rows_for_cluster(cluster))
gene_scores = {}
for gene in genes:
# TODO: optimization: we can use numpy arrays for the scores array
# and then sum
edges = network.edges_with_node(gene)
for edge in edges:
other_gene = edge[0]
if other_gene == gene:
other_gene = edge[1]
if other_gene in ALL_GENES:
if other_gene not in gene_scores:
gene_scores[other_gene] = []
gene_scores[other_gene].append(edge[2])
final_gene_scores = {}
for gene, scores in gene_scores.items():
final_gene_scores[gene] = sum(scores) / len(genes)
final_gene_scores[gene] = -np.log(final_gene_scores[gene] + 1)
return final_gene_scores
class ScoringFunction(scoring.ScoringFunctionBase):
"""Network scoring function. Note that even though there are several
networks, scoring can't be generalized with the default ScoringCombiner,
since the scores are computed through weighted addition rather than
quantile normalization"""
def __init__(self, organism, membership, ratios, config_params):
"""Create scoring function instance"""
scoring.ScoringFunctionBase.__init__(self, "Networks", organism, membership,
ratios, config_params)
self.__networks = None
self.run_log = scoring.RunLog("network", config_params)
def initialize(self, args):
"""process additional parameters"""
self.weights = {nw['type']: nw['weight'] for nw in args['networks']}
def run_logs(self):
return [self.run_log]
def compute(self, iteration_result, ref_matrix=None):
"""overridden compute for storing additional information"""
result = scoring.ScoringFunctionBase.compute(self, iteration_result, ref_matrix)
iteration_result['networks'] = self.score_means
return result
def compute_force(self, iteration_result, ref_matrix=None):
"""overridden compute for storing additional information"""
result = scoring.ScoringFunctionBase.compute_force(self, iteration_result, ref_matrix)
iteration_result['networks'] = self.score_means
return result
def networks(self):
"""networks are cached"""
if self.__networks is None:
self.__networks = retrieve_networks(self.organism)
if self.config_params['remap_network_nodes']:
# network names are non-primary, this can happen
# when the user makes up their own data
for network in self.__networks:
network.validate(self.organism.thesaurus(),
self.gene_names())
return self.__networks
def __update_score_means(self, network_scores):
"""returns the score means, adjusted to the current cluster setup"""
# a dictionary that holds the network score means for
# each cluster, separated for each network
if network_scores:
score_means = {network.name: self.__compute_cluster_score_means(network_scores[network.name])
for network in self.networks()}
return {network: np.average(np.array(list(cluster_score_means.values())))
for network, cluster_score_means in score_means.items()}
return {}
def do_compute(self, iteration_result, ref_matrix=None):
"""compute method, iteration is the 0-based iteration number"""
matrix = dm.DataMatrix(len(self.gene_names()), self.num_clusters(),
self.gene_names())
network_scores = {}
for network in self.networks():
logging.debug("Compute scores for network '%s', WEIGHT: %f",
network.name, network.weight)
start_time = util.current_millis()
network_score = self.__compute_network_cluster_scores(network)
network_scores[network.name] = network_score
self.__update_score_matrix(matrix, network_score, network.weight)
elapsed = util.current_millis() - start_time
logging.debug("NETWORK '%s' SCORING TIME: %f s.",
network.name, (elapsed / 1000.0))
# compute and store score means
self.score_means = self.__update_score_means(network_scores)
return matrix
def __compute_network_cluster_scores(self, network):
"""computes the cluster scores for the given network"""
global COMPUTE_NETWORK, ALL_GENES, NETWORK_SCORE_MEMBERSHIP
result = {}
use_multiprocessing = self.config_params[
scoring.KEY_MULTIPROCESSING]
# Set the huge memory objects into globals
# These are readonly anyways, but using Manager.list() or something
# similar brings this down to a crawl
COMPUTE_NETWORK = network
ALL_GENES = set(self.gene_names()) # optimization: O(1) lookup
NETWORK_SCORE_MEMBERSHIP = self.membership
if use_multiprocessing:
with util.get_mp_pool(self.config_params) as pool:
map_results = pool.map(compute_network_scores, xrange(1, self.num_clusters() + 1))
for cluster in xrange(1, self.num_clusters() + 1):
result[cluster] = map_results[cluster - 1]
else:
for cluster in xrange(1, self.num_clusters() + 1):
result[cluster] = compute_network_scores(cluster)
# cleanup
COMPUTE_NETWORK = None
ALL_GENES = None
NETWORK_SCORE_MEMBERSHIP = None
return result
def __update_score_matrix(self, matrix, network_score, weight):
"""add values into the result score matrix"""
mvalues = matrix.values
gene_names = self.gene_names()
for cluster in xrange(1, self.num_clusters() + 1):
cluster_genes = set(network_score[cluster].keys())
for row_index in xrange(self.ratios.num_rows):
gene = gene_names[row_index]
if gene in cluster_genes:
weighted_score = network_score[cluster][gene] * weight
mvalues[row_index][cluster - 1] += weighted_score
def __compute_cluster_score_means(self, network_score):
"""compute the score means on the given network score"""
result = {}
for cluster in xrange(1, self.num_clusters() + 1):
cluster_scores = [network_score[cluster][gene]
if gene in network_score[cluster] else 0.0
for gene in self.rows_for_cluster(cluster)]
result[cluster] = util.trim_mean(cluster_scores, 0.05)
return result
def retrieve_networks(organism):
"""retrieves the networks provided by the organism object and
possibly other sources, doing some normalization if necessary
Note: wanted to make it private, but the scoring function
can not see it after doing so"""
networks = organism.networks()
max_score = 0
for network in networks:
#logging.debug("Network '%s' with %d edges", network.name(),
# network.num_edges())
nw_total = network.total_score()
if nw_total > max_score:
max_score = nw_total
for network in networks:
network.normalize_scores_to(max_score)
return networks
| lgpl-3.0 |
joehillen/txjason | txjason/service.py | 1 | 21563 | # -*- coding: utf-8 -*-
# The MIT License
#
# Copyright (c) 2010 Juhani Åhman <juhani.ahman@cs.helsinki.fi>
# Copyright (c) 2013 Flowroute LLC <matthew@flowroute.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Simple JSON-RPC service without transport layer
This library is intended as an auxiliary library for easy an implementation of
JSON-RPC services with Unix/TCP socket like transport protocols that do not
have complex special requirements. You need to utilize some suitable transport
protocol with this library to actually provide a working JSON-RPC service.
Features:
- Easy to use, small size, well tested.
- Supports JSON-RPC v2.0. Compatible with v1.x style calls with the exception
of v1.0 class-hinting.
- Optional argument type validation that significantly eases development of
jsonrpc method_data.
Notes:
- Modified by Matthew Williams for use with Twisted.
- Original project at https://bitbucket.org/rsyring/jsonrpcbase
Example:
import jsonrpcbase
chat_service = jsonrpcbase.JSONRPCService()
def login(username, password, timelimit=0):
(...)
return True
def receive_message(**kwargs):
(...)
return chat_message
def send_message(msg):
(...)
if __name__ == '__main__':
# Adds the method login to the service as a 'login'.
chat_service.add(login, types=[basestring, basestring, int])
# Adds the method receive_message to the service as a 'recv_msg'.
chat_service.add(receive_message, name='recv_msg',
types={"msg": basestring, "id": int})
# Adds the method send_message as a 'send_msg' to the service.
chat_service.add(send_message, 'send_msg')
(...)
# Receive a JSON-RPC call.
jsonmsg = my_socket.recv()
# Process the JSON-RPC call.
result = chat_service.call(jsonmsg)
# Send back results.
my_socket.send(result)
"""
import types
import json
from twisted.internet import defer, reactor
from twisted.python import log
DEFAULT_JSONRPC = '2.0'
class JSONRPCService(object):
"""
The JSONRPCService class is a JSON-RPC
"""
def __init__(self, timeout=None, reactor=reactor):
self.method_data = {}
self.serve_exception = None
self.out_of_service_deferred = None
self.pending = set()
self.timeout = timeout
self.reactor = reactor
def add(self, f, name=None, types=None, required=None):
"""
Adds a new method to the jsonrpc service.
Arguments:
f -- the remote function
name -- name of the method in the jsonrpc service
types -- list or dictionary of the types of accepted arguments
required -- list of required keyword arguments
If name argument is not given, function's own name will be used.
Argument types must be a list if positional arguments are used or a
dictionary if keyword arguments are used in the method in question.
Argument required MUST be used only for methods requiring keyword
arguments, not for methods accepting positional arguments.
"""
if name is None:
fname = f.__name__ # Register the function using its own name.
else:
fname = name
self.method_data[fname] = {'method': f}
if types is not None:
self.method_data[fname]['types'] = types
if required is not None:
self.method_data[fname]['required'] = required
def stopServing(self, exception=None):
"""
Returns a deferred that will fire immediately if there are
no pending requests, otherwise when the last request is removed
from self.pending.
"""
if exception is None:
exception = ServiceUnavailableError
self.serve_exception = exception
if self.pending:
d = self.out_of_service_deferred = defer.Deferred()
return d
return defer.succeed(None)
def startServing(self):
self.serve_exception = None
self.out_of_service_deferred = None
def cancelPending(self):
pending = self.pending.copy()
for i in pending:
i.cancel()
@defer.inlineCallbacks
def call(self, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = yield self.call_py(jsondata)
if result is None:
defer.returnValue(None)
else:
defer.returnValue(json.dumps(result))
@defer.inlineCallbacks
def call_py(self, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
try:
try:
rdata = json.loads(jsondata)
except ValueError:
raise ParseError
except ParseError, e:
defer.returnValue(self._get_err(e))
return
# set some default values for error handling
request = self._get_default_vals()
try:
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = yield self._handle_request(request)
# Don't respond to notifications
if respond is None:
defer.returnValue(None)
else:
defer.returnValue(respond)
return
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
try:
self._fill_request(request_, rdata_)
except InvalidRequestError, e:
err = self._get_err(e, request_['id'])
if err:
responds.append(err)
continue
except JSONRPCError, e:
err = self._get_err(e, request_['id'])
if err:
responds.append(err)
continue
requests.append(request_)
for request_ in requests:
try:
# TODO: We should use a deferred list so requests
# are processed in parallel
respond = yield self._handle_request(request_)
except JSONRPCError, e:
respond = self._get_err(e,
request_['id'],
request_['jsonrpc'])
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
defer.returnValue(responds)
return
# Nothing to respond.
defer.returnValue(None)
return
else:
# empty dict, list or wrong type
raise InvalidRequestError
except InvalidRequestError, e:
defer.returnValue(self._get_err(e, request['id']))
except JSONRPCError, e:
defer.returnValue(self._get_err(e,
request['id'],
request['jsonrpc']))
def _get_err(self, e, id=None, jsonrpc=DEFAULT_JSONRPC):
"""
Returns jsonrpc error message.
"""
# Do not respond to notifications when the request is valid.
if not id \
and not isinstance(e, ParseError) \
and not isinstance(e, InvalidRequestError):
return None
respond = {'id': id}
if isinstance(jsonrpc, int):
# v1.0 requires result to exist always.
# No error codes are defined in v1.0 so only use the message.
if jsonrpc == 10:
respond['result'] = None
respond['error'] = e.dumps()['message']
else:
self._fill_ver(jsonrpc, respond)
respond['error'] = e.dumps()
else:
respond['jsonrpc'] = jsonrpc
respond['error'] = e.dumps()
return respond
def _fill_ver(self, iver, respond):
"""
Fills version information to the respond from the internal integer
version.
"""
if iver == 20:
respond['jsonrpc'] = '2.0'
if iver == 11:
respond['version'] = '1.1'
def _vargs(self, f):
"""
Returns True if given function accepts variadic positional arguments,
otherwise False.
"""
if f.func_code.co_flags & 4:
return True
return False
def _man_args(self, f):
"""
Returns number of mandatory arguments required by given function.
"""
argcount = f.func_code.co_argcount
# account for "self" getting passed to class instance methods
if isinstance(f, types.MethodType):
argcount -= 1
if f.func_defaults is None:
return argcount
return argcount - len(f.func_defaults)
def _max_args(self, f):
"""
Returns maximum number of arguments accepted by given function.
"""
if f.func_defaults is None:
return f.func_code.co_argcount
return f.func_code.co_argcount + len(f.func_defaults)
def _get_jsonrpc(self, rdata):
"""
Returns jsonrpc request's jsonrpc value.
InvalidRequestError will be raised if the jsonrpc value has invalid
value.
"""
if 'jsonrpc' in rdata:
if rdata['jsonrpc'] == '2.0':
return 20
else:
# invalid version
raise InvalidRequestError
else:
# It's probably a JSON-RPC v1.x style call.
if 'version' in rdata:
if rdata['version'] == '1.1':
return 11
# Assume v1.0.
return 10
def _get_id(self, rdata):
"""
Returns jsonrpc request's id value or None if there is none.
InvalidRequestError will be raised if the id value has invalid type.
"""
if 'id' in rdata:
if isinstance(rdata['id'], basestring) or \
isinstance(rdata['id'], int) or \
isinstance(rdata['id'], long) or \
isinstance(rdata['id'], float) or \
rdata['id'] is None:
return rdata['id']
else:
# invalid type
raise InvalidRequestError
else:
# It's a notification.
return None
def _get_method(self, rdata):
"""
Returns jsonrpc request's method value.
InvalidRequestError will be raised if it's missing or is wrong type.
MethodNotFoundError will be raised if a method with given method name
does not exist.
"""
if 'method' in rdata:
if not isinstance(rdata['method'], basestring):
raise InvalidRequestError
else:
raise InvalidRequestError
if rdata['method'] not in self.method_data.keys():
raise MethodNotFoundError
return rdata['method']
def _get_params(self, rdata):
"""
Returns a list of jsonrpc request's method parameters.
"""
if 'params' in rdata:
if isinstance(rdata['params'], dict) \
or isinstance(rdata['params'], list) \
or rdata['params'] is None:
return rdata['params']
else:
# wrong type
raise InvalidRequestError
else:
return None
def _fill_request(self, request, rdata):
"""Fills request with data from the jsonrpc call."""
if not isinstance(rdata, dict):
raise InvalidRequestError
request['jsonrpc'] = self._get_jsonrpc(rdata)
request['id'] = self._get_id(rdata)
request['method'] = self._get_method(rdata)
request['params'] = self._get_params(rdata)
@defer.inlineCallbacks
def _call_method(self, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method):
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if not self._vargs(method) \
and len(params) > self._max_args(method):
raise InvalidParamsError('too many arguments')
result = yield defer.maybeDeferred(method, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = yield defer.maybeDeferred(method, **params)
else: # No params
result = yield defer.maybeDeferred(method)
except JSONRPCError:
raise
except Exception:
# Exception was raised inside the method.
log.msg('Exception raised while invoking RPC method "{}".'.format(
request['method']))
log.err()
raise ServerError
defer.returnValue(result)
def _remove_pending(self, d):
self.pending.remove(d)
if self.out_of_service_deferred and not self.pending:
self.out_of_service_deferred.callback(None)
@defer.inlineCallbacks
def _handle_request(self, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
if self.serve_exception:
raise self.serve_exception()
d = self._call_method(request)
self.pending.add(d)
if self.timeout:
timeout_deferred = self.reactor.callLater(self.timeout, d.cancel)
def completed(result):
if timeout_deferred.active():
# cancel the timeout_deferred if it has not been fired yet
# this is to prevent d's deferred chain from firing twice
# (and raising an exception).
timeout_deferred.cancel()
return result
d.addBoth(completed)
try:
result = yield d
except defer.CancelledError:
# The request was cancelled due to a timeout or by cancelPending
# having been called. We return a TimeoutError to the client.
self._remove_pending(d)
raise TimeoutError()
except Exception as e:
self._remove_pending(d)
raise e
self._remove_pending(d)
# Do not respond to notifications.
if request['id'] is None:
defer.returnValue(None)
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
defer.returnValue(respond)
def _get_default_vals(self):
"""
Returns dictionary containing default jsonrpc request/responds values
for error handling purposes.
"""
return {"jsonrpc": DEFAULT_JSONRPC, "id": None}
def _validate_params_types(self, method, params):
"""
Validates request's parameter types.
"""
if isinstance(params, list):
if not isinstance(self.method_data[method]['types'], list):
raise InvalidParamsError(
'expected keyword params, not positional')
for param, type, posnum in zip(params,
self.method_data[method]['types'],
range(1, len(params)+1)):
if not (isinstance(param, type) or param is None):
raise InvalidParamsError(
'positional arg #{} is the wrong type'.format(posnum))
elif isinstance(params, dict):
if not isinstance(self.method_data[method]['types'], dict):
raise InvalidParamsError(
'expected positional params, not keyword')
if 'required' in self.method_data[method]:
for key in self.method_data[method]['required']:
if key not in params:
raise InvalidParamsError('missing key: %s' % key)
for key in params.keys():
if key not in self.method_data[method]['types'] or \
not (isinstance(params[key],
self.method_data[method]['types'][key])
or params[key] is None):
raise InvalidParamsError(
'arg "{}" is the wrong type'.format(key))
class JSONRPCError(Exception):
"""
JSONRPCError class based on the JSON-RPC 2.0 specs.
code - number
message - string
data - object
"""
code = 0
message = None
data = None
def __init__(self, message=None):
"""Setup the Exception and overwrite the default message."""
if message is not None:
self.message = message
def dumps(self):
"""Return the Exception data in a format for JSON-RPC."""
error = {'code': self.code,
'message': str(self.message)}
if self.data is not None:
error['data'] = self.data
return error
#==============================================================================
# Exceptions
#
# The error-codes -32768 .. -32000 (inclusive) are reserved for pre-defined
# errors.
#
# Any error-code within this range not defined explicitly below is reserved
# for future use
#==============================================================================
class ParseError(JSONRPCError):
"""Invalid JSON. An error occurred on the server while parsing the JSON
text."""
code = -32700
message = 'Parse error'
class InvalidRequestError(JSONRPCError):
"""The received JSON is not a valid JSON-RPC Request."""
code = -32600
message = 'Invalid request'
class MethodNotFoundError(JSONRPCError):
"""The requested remote-procedure does not exist / is not available."""
code = -32601
message = 'Method not found'
class InvalidParamsError(JSONRPCError):
"""Invalid method parameters."""
code = -32602
message = 'Invalid params'
def __init__(self, data=None):
self.data = data
class InternalError(JSONRPCError):
"""Internal JSON-RPC error."""
code = -32603
message = 'Internal error'
# -32099..-32000 Server error. Reserved for implementation-defined
# server-errors.
class KeywordError(JSONRPCError):
"""The received JSON-RPC request is trying to use keyword arguments even
tough its version is 1.0."""
code = -32099
message = 'Keyword argument error'
class TimeoutError(JSONRPCError):
"""The request took too long to process."""
code = -32098
message = 'Server Timeout'
class ServiceUnavailableError(JSONRPCError):
"""The service is not available (stopServing called)."""
code = -32097
message = 'Service Unavailable'
class ServerError(JSONRPCError):
"""Generic server error."""
code = -32000
message = 'Server error'
| mit |
abzaloid/maps | django-project/lib/python2.7/site-packages/django/contrib/admin/options.py | 66 | 84355 | import copy
import operator
import warnings
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, validation, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, flatten_fieldsets, get_deleted_objects,
lookup_needs_distinct, model_format_dict, quote, unquote,
)
from django.contrib.auth import get_permission_codename
from django.core import checks
from django.core.exceptions import (
FieldDoesNotExist, FieldError, ImproperlyConfigured, PermissionDenied,
ValidationError,
)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.sql.constants import QUERY_TERMS
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, escapejs
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat, ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
# Validation of ModelAdmin definitions
# Old, deprecated style:
validator_class = None
default_validator_class = validation.BaseValidator
# New style:
checks_class = BaseModelAdminChecks
@classmethod
def validate(cls, model):
warnings.warn(
'ModelAdmin.validate() is deprecated. Use "check()" instead.',
RemovedInDjango19Warning)
if cls.validator_class:
validator = cls.validator_class()
else:
validator = cls.default_validator_class()
validator.validate(cls, model)
@classmethod
def check(cls, model, **kwargs):
if cls.validator_class:
warnings.warn(
'ModelAdmin.validator_class is deprecated. '
'ModelAdmin validators must be converted to use '
'the system check framework.',
RemovedInDjango19Warning)
validator = cls.validator_class()
try:
validator.validate(cls, model)
except ImproperlyConfigured as e:
return [checks.Error(e.args[0], hint=None, obj=cls)]
else:
return []
else:
return cls.checks_class().check(cls, model, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.rel, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.rel.to, None)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.rel.to._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.rel.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
@property
def declared_fieldsets(self):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning, stacklevel=2
)
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
# We access the property and check if it triggers a warning.
# If it does, then it's ours and we can safely ignore it, but if
# it doesn't then it has been overridden so we must warn about the
# deprecation.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
declared_fieldsets = self.declared_fieldsets
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.declared_fieldsets is deprecated and "
"will be removed in Django 1.9.",
RemovedInDjango19Warning
)
if declared_fieldsets:
return declared_fieldsets
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specifically included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
if hasattr(field.rel, 'get_related_field'):
rel_name = field.rel.get_related_field().name
else:
rel_name = None
elif isinstance(field, ForeignObjectRel):
model = field.related_model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
if (any(issubclass(model, related_model) for model in registered_models) and
related_object.field.rel.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
# validation
# Old, deprecated style:
default_validator_class = validation.ModelAdminValidator
# New style:
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'admin/RelatedObjectLookups.js',
'jquery%s.js' % extra,
'jquery.init.js'
]
if self.actions is not None:
js.append('actions%s.js' % extra)
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Returns an instance matching the field and value provided, the primary
key is used if no field is provided. Returns ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def _get_formsets(self, request, obj):
"""
Helper function that exists to allow the deprecation warning to be
executed while this function continues to return a generator.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj)
def get_formsets(self, request, obj=None):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning, stacklevel=2
)
return self._get_formsets(request, obj)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
# We call get_formsets() [deprecated] and check if it triggers a
# warning. If it does, then it's ours and we can safely ignore it, but
# if it doesn't then it has been overridden so we must warn about the
# deprecation.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
formsets = self.get_formsets(request, obj)
if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning):
warnings.warn(
"ModelAdmin.get_formsets() is deprecated and will be removed in "
"Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.",
RemovedInDjango19Warning, stacklevel=2
)
if formsets:
zipped = zip(formsets, self.get_inline_instances(request, None))
for formset, inline in zipped:
yield formset, inline
else:
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'pk_value': escape(pk_value), # for possible backwards-compatibility
'value': escape(value),
'obj': escapejs(obj)
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'change',
'value': escape(value),
'obj': escapejs(obj),
'new_value': escape(new_value),
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'delete',
'value': escape(obj_id),
})
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
)
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url=reverse('admin:%s_%s_add' % (
opts.app_label, opts.model_name),
current_app=self.admin_site.name))
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
if add:
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, self.model(), change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra]
if self.prepopulated_fields:
js.extend(['urlify.js', 'prepopulate%s.js' % extra])
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.rel and field.rel.to != self.parent_model:
opts = field.rel.to._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| mit |
XDEMOND/PracticaTouchmove | plugins/ti.alloy/plugin.py | 1729 | 5251 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
invisiblek/android_kernel_oppo_n3 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
segsignal/bitcoin | qa/rpc-tests/import-rescan.py | 38 | 9103 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 1 creates an address for each type of
import RPC call and node 0 sends BTC to it. Then other nodes import the
addresses, and the test makes listtransactions and getbalance calls to confirm
that the importing node either did or did not execute rescans picking up the
send transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (start_nodes, connect_nodes, sync_blocks, assert_equal, set_node_times)
from decimal import Decimal
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response, error = try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response, error = try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response, error = try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
assert_equal(error, {'message': 'Rescan is disabled in pruned mode',
'code': -4} if self.expect_disabled else None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + RESCAN_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
RESCAN_WINDOW = 2 * 60 * 60
class ImportRescanTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [["-debug=1"] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + RESCAN_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
fee = self.nodes[0].getnetworkinfo()["relayfee"]
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
def try_rpc(func, *args, **kwargs):
try:
return func(*args, **kwargs), None
except JSONRPCException as e:
return None, e.error
if __name__ == "__main__":
ImportRescanTest().main()
| mit |
shingonoide/odoo | addons/base_action_rule/__init__.py | 438 | 1098 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_action_rule
import test_models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rbramwell/pulp | server/test/unit/server/managers/repo/group/test_cud.py | 2 | 14632 | import traceback
import unittest
import mock
from .....base import PulpServerTests
from pulp.common.plugins import distributor_constants
from pulp.devel import mock_plugins
from pulp.server import exceptions as pulp_exceptions
from pulp.server.controllers import repository as repo_controller
from pulp.server.db import model
from pulp.server.db.model.criteria import Criteria
from pulp.server.db.model.repo_group import RepoGroup, RepoGroupDistributor
from pulp.server.managers import factory as managers_factory
from pulp.server.managers.repo.group import cud
class RepoGroupManagerInstantiationTests(unittest.TestCase):
def test_constructor(self):
try:
RepoGroup('contructor_group')
except:
self.fail(traceback.format_exc())
def test_factory(self):
try:
managers_factory.repo_group_manager()
except:
self.fail(traceback.format_exc())
class RepoGroupTests(PulpServerTests):
def setUp(self):
super(RepoGroupTests, self).setUp()
self.collection = RepoGroup.get_collection()
self.manager = cud.RepoGroupManager()
def tearDown(self):
super(RepoGroupTests, self).tearDown()
self.manager = None
model.Repository.drop_collection()
RepoGroup.get_collection().remove(safe=True)
RepoGroupDistributor.get_collection().remove(safe=True)
def _create_repo(self, repo_id):
return repo_controller.create_repo(repo_id)
class RepoGroupCUDTests(RepoGroupTests):
def setUp(self):
super(RepoGroupCUDTests, self).setUp()
mock_plugins.install()
def tearDown(self):
super(RepoGroupCUDTests, self).tearDown()
mock_plugins.reset()
def test_create(self):
group_id = 'create_group'
self.manager.create_repo_group(group_id)
group = self.collection.find_one({'id': group_id})
self.assertFalse(group is None)
def test_create_duplicate_id(self):
group_id = 'already_exists'
self.manager.create_repo_group(group_id)
self.assertRaises(pulp_exceptions.DuplicateResource,
self.manager.create_repo_group,
group_id)
def test_create_with_repo_ids(self):
repo_ids = ['test-repo-1', 'test-repo-2']
group_id = 'test-group-id'
self._create_repo(repo_ids[0])
self._create_repo(repo_ids[1])
self.manager.create_repo_group(group_id=group_id, repo_ids=repo_ids)
group = self.collection.find_one({'id': group_id})
self.assertFalse(group is None)
self.assertEqual(group['repo_ids'], repo_ids)
def test_create_with_invalid_repo_id(self):
repo_ids = ['valid-repo-id', 'invalid-repo-id']
group_id = 'test-group-id'
self._create_repo('valid-repo-id')
self.assertRaises(pulp_exceptions.MissingResource,
self.manager.create_repo_group,
group_id=group_id, repo_ids=repo_ids)
@mock.patch('pulp.server.managers.repo.group.cud.RepoGroupManager.create_repo_group',
spec_set=cud.RepoGroupManager.create_repo_group, return_value='potato')
def test_create_and_config_no_distributors(self, create_repo_group):
"""
Tests creating a repo group using create_and_configure_repo_group using only the
required arguments.
"""
# Test that create_repo_group is called with the correct arguments
result = self.manager.create_and_configure_repo_group('group_id1')
self.assertEqual(1, self.manager.create_repo_group.call_count)
self.assertEqual((('group_id1', None, None, None, None),),
self.manager.create_repo_group.call_args)
self.assertEqual('potato', result)
@mock.patch('pulp.server.managers.repo.group.cud.RepoGroupManager.create_repo_group',
spec_set=cud.RepoGroupManager.create_repo_group, return_value='potato')
@mock.patch(
'pulp.server.managers.repo.group.distributor.RepoGroupDistributorManager.add_distributor')
def test_create_and_config_distributors(self, mock_add_distributor, create_repo_group):
"""
Tests creating a repo group and adding distributors
"""
group_id = 'group_id1'
display_name = 'A display name'
description = 'A test repo group'
notes = {'key': 'value'}
distributor_list = [{distributor_constants.DISTRIBUTOR_TYPE_ID_KEY: 'fake_distributor',
distributor_constants.DISTRIBUTOR_CONFIG_KEY: {'a': 1},
distributor_constants.DISTRIBUTOR_ID_KEY: 'fake_id'}]
repo_ids = ['repo1', 'repo2']
# Assert that create_repo_group was called with all the correct arguments
result = self.manager.create_and_configure_repo_group(group_id, display_name, description,
repo_ids, notes, distributor_list)
self.assertEqual(1, self.manager.create_repo_group.call_count)
self.assertEqual(((group_id, display_name, description, repo_ids, notes),),
self.manager.create_repo_group.call_args)
self.assertEqual('potato', result)
# Assert add_distributor was called with all the correct arguments
self.assertEqual(1, mock_add_distributor.call_count)
self.assertEqual(group_id, mock_add_distributor.call_args[0][0])
self.assertEqual('fake_distributor', mock_add_distributor.call_args[0][1])
self.assertEqual({'a': 1}, mock_add_distributor.call_args[0][2])
self.assertEqual('fake_id', mock_add_distributor.call_args[0][3])
# Mock out delete because we expect it to be called when distributor validation fails
@mock.patch('pulp.server.managers.repo.group.cud.RepoGroupManager.delete_repo_group',
spec_set=cud.RepoGroupManager.delete_repo_group)
@mock.patch('pulp.server.managers.repo.group.cud.RepoGroupManager.create_repo_group',
spec_set=cud.RepoGroupManager.create_repo_group, return_value='potato')
def test_create_and_config_bad_distributor_list(self, create_repo_group, delete_repo_group):
"""
Test creating a repo group with a distributor_list that isn't a list, tuple or None
"""
# Test that an exception is raised and a group is not created
self.assertRaises(
pulp_exceptions.InvalidValue,
self.manager.create_and_configure_repo_group, group_id='id', distributor_list='string')
self.assertEqual(0, self.manager.create_repo_group.call_count)
@mock.patch('pulp.server.managers.repo.group.cud.RepoGroupManager.create_repo_group',
spec_set=cud.RepoGroupManager.create_repo_group, return_value='potato')
def test_create_and_config_bad_distributor(self, create_repo_group):
"""
Test creating a repo group with a distributor that is not a dictionary
"""
# Test that an exception is raised and a group is not created
self.assertRaises(
pulp_exceptions.InvalidValue, self.manager.create_and_configure_repo_group,
group_id='id', distributor_list=['not a dict'])
self.assertEqual(0, self.manager.create_repo_group.call_count)
@mock.patch('pulp.server.managers.repo.group.cud.RepoGroupManager.delete_repo_group',
spec_set=cud.RepoGroupManager.delete_repo_group)
@mock.patch('pulp.server.managers.repo.group.cud.RepoGroupManager.create_repo_group',
spec_set=cud.RepoGroupManager.create_repo_group, return_value='potato')
@mock.patch(
'pulp.server.managers.repo.group.distributor.RepoGroupDistributorManager.add_distributor')
def test_create_and_config_failed_dist_add(self, mock_add_distributor, create_repo_group,
delete_repo_group):
"""
Test creating a repo group which results the distributor manager raising an InvalidValue
"""
mock_add_distributor.side_effect = pulp_exceptions.InvalidValue(['everything'])
# Test that if add_distributor fails, an exception is raised and the created group is
# cleaned up
self.assertRaises(
pulp_exceptions.InvalidValue, self.manager.create_and_configure_repo_group,
group_id='id', distributor_list=[{}])
self.manager.create_repo_group.assert_called_once_with('id', None, None, None, None)
self.manager.delete_repo_group.assert_called_once_with('id')
def test_update_display_name(self):
group_id = 'update_me'
original_display_name = 'Update Me'
self.manager.create_repo_group(group_id, display_name=original_display_name)
group = self.collection.find_one({'id': group_id})
self.assertTrue(group['display_name'] == original_display_name)
new_display_name = 'Updated!'
self.manager.update_repo_group(group_id, display_name=new_display_name)
group = self.collection.find_one({'id': group_id})
self.assertFalse(group['display_name'] == original_display_name)
self.assertTrue(group['display_name'] == new_display_name)
def test_update_description(self):
group_id = 'update_me'
original_description = 'This is a repo group that needs to be updated :P'
self.manager.create_repo_group(group_id, description=original_description)
group = self.collection.find_one({'id': group_id})
self.assertTrue(group['description'] == original_description)
new_description = 'This repo group has been updated! :D'
self.manager.update_repo_group(group_id, description=new_description)
group = self.collection.find_one({'id': group_id})
self.assertFalse(group['description'] == original_description)
self.assertTrue(group['description'] == new_description)
def test_update_notes(self):
group_id = 'notes'
original_notes = {'key_1': 'blonde', 'key_3': 'brown'}
self.manager.create_repo_group(group_id, notes=original_notes)
group = self.collection.find_one({'id': group_id})
self.assertTrue(group['notes'] == original_notes)
delta = {'key_2': 'ginger', 'key_3': ''}
self.manager.update_repo_group(group_id, notes=delta)
group = self.collection.find_one({'id': group_id})
self.assertEqual(group['notes'].get('key_1', None), 'blonde')
self.assertEqual(group['notes'].get('key_2', None), 'ginger')
self.assertTrue('key_3' not in group['notes'])
def test_set_note(self):
group_id = 'noteworthy'
self.manager.create_repo_group(group_id)
key = 'package'
value = ['package_dependencies']
note = {key: value}
self.manager.set_note(group_id, key, value)
group = self.collection.find_one({'id': group_id})
self.assertTrue(group['notes'] == note)
def test_unset_note(self):
group_id = 'not_noteworthy'
notes = {'marital_status': 'polygamist'}
self.manager.create_repo_group(group_id, notes=notes)
group = self.collection.find_one({'id': group_id})
self.assertTrue(group['notes'] == notes)
self.manager.unset_note(group_id, 'marital_status')
group = self.collection.find_one({'id': group_id})
self.assertFalse(group['notes'])
def test_delete(self):
# Setup
group_id = 'delete_me'
self.manager.create_repo_group(group_id)
group = self.collection.find_one({'id': group_id})
self.assertFalse(group is None)
# Test
self.manager.delete_repo_group(group_id)
# Verify
group = self.collection.find_one({'id': group_id})
self.assertTrue(group is None)
def test_delete_with_distributor(self):
# Setup
group_id = 'doomed'
self.manager.create_repo_group(group_id)
distributor_id = 'doomed-dist'
dist_manager = managers_factory.repo_group_distributor_manager()
dist_manager.add_distributor(group_id, 'mock-group-distributor', {},
distributor_id=distributor_id)
distributor = RepoGroupDistributor.get_collection().find_one({'id': distributor_id})
self.assertTrue(distributor is not None)
# Test
self.manager.delete_repo_group(group_id)
# Verify
distributor = RepoGroupDistributor.get_collection().find_one({'id': distributor_id})
self.assertTrue(distributor is None)
class RepoGroupMembershipTests(RepoGroupTests):
def test_add_single(self):
group_id = 'test_group'
self.manager.create_repo_group(group_id)
repo = self._create_repo('test_repo')
criteria = Criteria(filters={'repo_id': repo.repo_id}, fields=['repo_id'])
self.manager.associate(group_id, criteria)
group = self.collection.find_one({'id': group_id})
self.assertTrue(repo.repo_id in group['repo_ids'])
def test_remove_single(self):
group_id = 'test_group'
repo = self._create_repo('test_repo')
self.manager.create_repo_group(group_id, repo_ids=[repo['repo_id']])
group = self.collection.find_one({'id': group_id})
self.assertTrue(repo.repo_id in group['repo_ids'])
criteria = Criteria(filters={'id': repo['id']}, fields=['id'])
self.manager.unassociate(group_id, criteria)
group = self.collection.find_one({'id': group_id})
self.assertFalse(repo['id'] in group['repo_ids'])
def test_delete_repo(self):
group_id = 'delete_from_me'
repo = self._create_repo('delete_me')
self.manager.create_repo_group(group_id, repo_ids=[repo.repo_id])
group = self.collection.find_one({'id': group_id})
self.assertTrue(repo.repo_id in group['repo_ids'])
repo_controller.delete(repo.repo_id)
group = self.collection.find_one({'id': group_id})
self.assertFalse(repo.repo_id in group['repo_ids'])
def test_associate_id_regex(self):
group_id = 'associate_by_regex'
self.manager.create_repo_group(group_id)
repo_1 = self._create_repo('repo_1')
repo_2 = self._create_repo('repo_2')
criteria = Criteria(filters={'repo_id': {'$regex': 'repo_[12]'}})
self.manager.associate(group_id, criteria)
group = self.collection.find_one({'id': group_id})
self.assertTrue(repo_1.repo_id in group['repo_ids'])
self.assertTrue(repo_2.repo_id in group['repo_ids'])
| gpl-2.0 |
joostvanzwieten/nutils | docs/sphinx_mods.py | 1 | 14556 | # Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import inspect, pathlib, shutil, os, runpy, urllib.parse, shlex, doctest, re, io, hashlib, base64, treelog, stringly
import docutils.nodes, docutils.parsers.rst, docutils.statemachine
import sphinx.util.logging, sphinx.util.docutils, sphinx.addnodes
import nutils.matrix, nutils.testing
import numpy
project_root = pathlib.Path(__file__).parent.parent.resolve()
def process_signature(self, objtype, fullname, object, options, args, retann):
if objtype in ('function', 'class', 'method'):
try:
signature = inspect.signature(object)
except ValueError:
# Some builtins have no signature.
return
else:
return
# Drop annotations from signature.
signature = signature.replace(parameters=(param.replace(annotation=param.empty) for param in signature.parameters.values()),
return_annotation=inspect.Signature.empty)
# Return a string representation of args and of the return annotation. Note
# that `str(signature)` would have included the return annotation if we
# hadn't removed it above.
return str(signature).replace('\\', '\\\\'), ''
def print_rst_autogen_header(*, file, src=None):
print('..', file=file)
print(' Automatically generated. Edits are futile.', file=file)
print(file=file)
print(':autogenerated:', file=file)
if src is not None:
abssrc = src.resolve().relative_to(project_root)
print(':autogeneratedfrom: {}'.format(abssrc), file=file)
print(file=file)
def print_rst_h1(text, *, file):
assert '\n' not in text
print(file=file)
print(text, file=file)
print('='*len(text), file=file)
print(file=file)
def print_rst_label(name, *, file):
print(file=file)
print('.. _{}:'.format(name), file=file)
print(file=file)
def copy_utime(src, dst):
stat = os.stat(str(src))
os.utime(str(dst), ns=(stat.st_atime_ns, stat.st_mtime_ns))
def generate_examples(app):
dst_examples = pathlib.Path(app.srcdir)/'examples'
dst_examples.mkdir(parents=True, exist_ok=True)
srcs = tuple(f for f in sorted(project_root.glob('examples/*.py')) if f.name != '__init__.py')
for src in sphinx.util.status_iterator(srcs, 'generating examples... ', 'purple', len(srcs), app.verbosity):
name = src.name
dst = dst_examples/(src.with_suffix('.rst').name)
with dst.open('w', encoding='utf-8') as f_dst:
print_rst_autogen_header(file=f_dst, src=src)
# Add a label such that you can reference an example by
# :ref:`examples/laplace.py`.
print_rst_label('examples/{}'.format(name), file=f_dst)
print_rst_h1(name, file=f_dst)
print('.. exampledoc:: {}'.format(src.relative_to(project_root).as_posix()), file=f_dst)
copy_utime(src, dst)
class LineIter:
def __init__(self, lines):
self._lines = iter(lines)
self._index = -1
self._next = None
self.__next__()
def __bool__(self):
return self._next != StopIteration
def __iter__(self):
return self
def __next__(self):
if self._next == StopIteration:
raise StopIteration
value = self._index, self._next
try:
self._next = next(self._lines)
self._index += 1
except StopIteration:
self._next = StopIteration
return value
@property
def peek(self):
if self._next == StopIteration:
raise ValueError
else:
return self._next
class ExampleDocDirective(docutils.parsers.rst.Directive):
has_content = False
required_arguments = 1
options_arguments = 0
@staticmethod
def _isdocline(line):
line = line.lstrip()
return line.rstrip() == '#' or line.startswith('# ')
def run(self):
logger = sphinx.util.logging.getLogger(__name__)
nodes = []
src = project_root/self.arguments[0]
with src.open('r', encoding='utf-8') as f:
prevtype = None
lines = LineIter(f)
if lines and lines.peek.startswith('#!'):
next(lines)
while lines:
if lines.peek.rstrip('\n') == '':
next(lines)
elif self._isdocline(lines.peek):
# Collect all doc lines.
contents = docutils.statemachine.ViewList()
while lines and self._isdocline(lines.peek):
i, line = next(lines)
contents.append(line.lstrip()[2:], self.arguments[0], i)
# Parse as rst into `node`.
with sphinx.util.docutils.switch_source_input(self.state, contents):
node = docutils.nodes.container()
self.state.nested_parse(contents, 0, node)
# Process sh roles. Add links to logs.
for sh_node in node.traverse(docutils.nodes.literal):
if 'nutils_sh' not in sh_node:
continue
cmdline = sh_node.get('nutils_sh')
cmdline_parts = tuple(shlex.split(cmdline))
if cmdline_parts[:2] != ('python3', src.name):
logger.warning('Not creating a log for {}.'.format(cmdline))
continue
log_link = sphinx.addnodes.only(expr='html')
log_link.append(docutils.nodes.inline('', ' '))
xref = sphinx.addnodes.pending_xref('', reftype='nutils-log', refdomain='std', reftarget=cmdline_parts[2:], script=src)
xref += docutils.nodes.inline('', '(view log)', classes=['nutils-log-link'])
log_link += xref
sh_node.parent.insert(sh_node.parent.index(sh_node)+1, log_link)
nodes.extend(node.children)
else:
# Collect all source lines.
istart, line = next(lines)
contents = [line]
while lines and not self._isdocline(lines.peek):
i, line = next(lines)
contents.append(line)
# Remove trailing empty lines.
while contents and contents[-1].rstrip('\n') == '':
del contents[-1]
contents = ''.join(contents)
# Create literal block.
literal = docutils.nodes.literal_block(contents, contents)
literal['language'] = 'python3'
literal['linenos'] = True
literal['highlight_args'] = dict(linenostart=istart+1)
sphinx.util.nodes.set_source_info(self, literal)
nodes.append(literal)
return nodes
def role_sh(name, rawtext, text, lineno, inliner, options={}, context=[]):
return [docutils.nodes.literal('', text, nutils_sh=text)], []
def create_log(app, env, node, contnode):
logger = sphinx.util.logging.getLogger(__name__)
if node['reftype'] == 'nutils-log':
script = node.get('script')
scriptname = str(script.relative_to(project_root))
cmdline_args = node['reftarget']
cmdline = ' '.join(map(shlex.quote, [scriptname, *cmdline_args]))
target = '_logs/{}/index'.format(urllib.parse.quote(cmdline, safe='').replace('%', '+'))
dst_log = (pathlib.Path(app.builder.outdir)/target).parent
if dst_log.exists() and dst_log.stat().st_mtime > script.stat().st_mtime:
logger.debug('Skip building log of {cmdline} because it already exists and '
'is newer than {script}. Please touch {script} to force a rebuild.'
.format(script=scriptname, cmdline=cmdline))
else:
if dst_log.exists():
logger.debug('purging old log files... {}'.format(dst_log))
shutil.rmtree(str(dst_log))
else:
dst_log.parent.mkdir(parents=True, exist_ok=True)
logger.info('creating log... {}'.format(cmdline))
script_dict = runpy.run_path(str(script), run_name='__log_builder__')
# Parse cmdline.
func = script_dict['main']
params = inspect.signature(func).parameters
doc = stringly.util.DocString(func)
kwargs = doc.defaults.copy()
kwargs.update(arg.split('=', 1) for arg in cmdline_args if arg)
# Run script.
import matplotlib.testing
matplotlib.testing.setup()
with nutils.cli._htmllog(outdir=str(dst_log), scriptname=scriptname, kwargs=[(name, kwargs[name], doc.argdocs[name]) for name in params]) as log, treelog.set(log), nutils.matrix.backend('scipy'), nutils.warnings.via(treelog.warning):
func(**{name: stringly.loads(params[name].annotation, kwargs[name]) for name in params})
(dst_log/'log.html').rename(dst_log/'index.html')
refnode = docutils.nodes.reference('', '', internal=False, refuri=app.builder.get_relative_uri(env.docname, target))
refnode.append(contnode)
return refnode
def generate_api(app):
nutils = project_root/'nutils'
dst_root = pathlib.Path(app.srcdir)/'nutils'
dst_root.mkdir(parents=True, exist_ok=True)
srcs = tuple(f for f in sorted(nutils.glob('**/*.py')) if f != nutils/'__init__.py' and (f.name == '__init__.py' or not f.name.startswith('_')))
for src in sphinx.util.status_iterator(srcs, 'generating api... ', 'purple', len(srcs), app.verbosity):
module = '.'.join((src.parent if src.name == '__init__.py' else src.with_suffix('')).relative_to(nutils).parts)
dst = dst_root/(module+'.rst')
with dst.open('w', encoding='utf-8') as f:
print_rst_autogen_header(file=f, src=src)
print_rst_h1(module, file=f)
print('.. automodule:: {}'.format('nutils.{}'.format(module)), file=f)
copy_utime(src, dst)
def remove_generated(app, exception):
logger = sphinx.util.logging.getLogger(__name__)
for name in 'nutils', 'examples':
generated = pathlib.Path(app.srcdir)/name
shutil.rmtree(str(generated), onerror=lambda f, p, e: logger.warning('failed to remove {}'.format(p)))
class RequiresNode(docutils.nodes.Admonition, docutils.nodes.TextElement): pass
def html_visit_requires(self, node):
self.body.append(self.starttag(node, 'div', CLASS='requires'))
def html_depart_requires(self, node):
self.body.append('</div>\n')
def text_visit_requires(self, node):
self.new_state(0)
def text_depart_requires(self, node):
self.end_state()
class RequiresDirective(docutils.parsers.rst.Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
def run(self):
requires = tuple(name.strip() for name in self.arguments[0].split(','))
node = RequiresNode('requires')
node.document = self.state.document
sphinx.util.nodes.set_source_info(self, node)
msg = 'Requires {}.'.format(', '.join(requires))
node.append(docutils.nodes.paragraph('', docutils.nodes.Text(msg, msg), translatable=False))
return [node]
class ConsoleDirective(docutils.parsers.rst.Directive):
has_content = True
required_arguments = 0
options_arguments = 0
info = treelog.proto.Level.info if hasattr(treelog, 'proto') else 1
_console_log = treelog.FilterLog(treelog.StdoutLog(), minlevel=info)
def run(self):
document = self.state.document
env = document.settings.env
nodes = []
indent = min(len(line)-len(line.lstrip()) for line in self.content)
code = ''.join(line[indent:]+'\n' for line in self.content)
code_wo_spread = nutils.testing.FloatNeighborhoodOutputChecker.re_spread.sub(lambda m: m.group(0).split('±', 1)[0], code)
literal = docutils.nodes.literal_block(code_wo_spread, code_wo_spread, classes=['console'])
literal['language'] = 'python3'
literal['linenos'] = False
sphinx.util.nodes.set_source_info(self, literal)
nodes.append(literal)
import matplotlib.testing
matplotlib.testing.setup()
import matplotlib.pyplot
parser = doctest.DocTestParser()
runner = doctest.DocTestRunner(checker=nutils.testing.FloatNeighborhoodOutputChecker(), optionflags=doctest.ELLIPSIS)
globs = getattr(document, '_console_globs', {})
test = parser.get_doctest(code, globs, 'test', env.docname, self.lineno)
with treelog.set(self._console_log):
failures, tries = runner.run(test, clear_globs=False)
for fignum in matplotlib.pyplot.get_fignums():
fig = matplotlib.pyplot.figure(fignum)
with io.BytesIO() as f:
fig.savefig(f, format='svg')
name = hashlib.sha1(f.getvalue()).hexdigest()+'.svg'
uri = 'data:image/svg+xml;base64,{}'.format(base64.b64encode(f.getvalue()).decode())
nodes.append(docutils.nodes.image('', uri=uri, alt='image generated by matplotlib'))
matplotlib.pyplot.close('all')
if failures:
document.reporter.warning('doctest failed', line=self.lineno)
document._console_globs = test.globs
return nodes
def remove_console_globs(app, doctree):
if hasattr(doctree, '_console_globs'):
del doctree._console_globs
def fix_testcase_reference(app, env, node, contnode):
if node['reftarget'] == 'unittest.case.TestCase':
node = node.deepcopy()
node['reftarget'] = 'unittest.TestCase'
return app.emit_firstresult('missing-reference', env, node, contnode)
def setup(app):
app.connect('autodoc-process-signature', process_signature)
app.connect('builder-inited', generate_api)
app.connect('builder-inited', generate_examples)
app.add_directive('exampledoc', ExampleDocDirective)
app.add_role('sh', role_sh)
app.connect('missing-reference', create_log)
app.add_node(RequiresNode,
html=(html_visit_requires, html_depart_requires),
text=(text_visit_requires, text_depart_requires))
app.add_directive('requires', RequiresDirective)
app.add_directive('console', ConsoleDirective)
app.connect('doctree-read', remove_console_globs)
app.connect('build-finished', remove_generated)
app.connect('missing-reference', fix_testcase_reference)
if sphinx.version_info >= (1,8):
app.add_css_file('mods.css')
else:
app.add_stylesheet('mods.css')
# vim: sts=2:sw=2:et
| mit |
pplatek/odoo | addons/pos_restaurant/__init__.py | 332 | 1074 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import restaurant
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
albertomurillo/ansible | lib/ansible/module_utils/facts/hardware/base.py | 172 | 2736 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
class Hardware:
platform = 'Generic'
# FIXME: remove load_on_init when we can
def __init__(self, module, load_on_init=False):
self.module = module
def populate(self, collected_facts=None):
return {}
class HardwareCollector(BaseFactCollector):
name = 'hardware'
_fact_ids = set(['processor',
'processor_cores',
'processor_count',
# TODO: mounts isnt exactly hardware
'mounts',
'devices'])
_fact_class = Hardware
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| gpl-3.0 |
shaufi10/odoo | addons/hr_recruitment/res_config.py | 352 | 3627 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-Today OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class hr_applicant_settings(osv.TransientModel):
_name = 'hr.config.settings'
_inherit = ['hr.config.settings', 'fetchmail.config.settings']
_columns = {
'module_document': fields.boolean('Allow the automatic indexation of resumes',
help='Manage your CV\'s and motivation letter related to all applicants.\n'
'-This installs the module document_ftp. This will install the knowledge management module in order to allow you to search using specific keywords through the content of all documents (PDF, .DOCx...)'),
'alias_prefix': fields.char('Default Alias Name for Jobs'),
'alias_domain': fields.char('Alias Domain'),
}
_defaults = {
'alias_domain': lambda self, cr, uid, context: self.pool['mail.alias']._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
}
def _find_default_job_alias_id(self, cr, uid, context=None):
alias_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'hr_recruitment.mail_alias_jobs')
if not alias_id:
alias_ids = self.pool['mail.alias'].search(
cr, uid, [
('alias_model_id.model', '=', 'hr.applicant'),
('alias_force_thread_id', '=', False),
('alias_parent_model_id.model', '=', 'hr.job'),
('alias_parent_thread_id', '=', False),
('alias_defaults', '=', '{}')
], context=context)
alias_id = alias_ids and alias_ids[0] or False
return alias_id
def get_default_alias_prefix(self, cr, uid, ids, context=None):
alias_name = False
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if alias_id:
alias_name = self.pool['mail.alias'].browse(cr, uid, alias_id, context=context).alias_name
return {'alias_prefix': alias_name}
def set_default_alias_prefix(self, cr, uid, ids, context=None):
mail_alias = self.pool.get('mail.alias')
for record in self.browse(cr, uid, ids, context=context):
alias_id = self._find_default_job_alias_id(cr, uid, context=context)
if not alias_id:
create_ctx = dict(context, alias_model_name='hr.applicant', alias_parent_model_name='hr.job')
alias_id = self.pool['mail.alias'].create(cr, uid, {'alias_name': record.alias_prefix}, context=create_ctx)
else:
mail_alias.write(cr, uid, alias_id, {'alias_name': record.alias_prefix}, context=context)
return True
| agpl-3.0 |
mapbased/vitess | py/vtctl/grpc_vtctl_client.py | 4 | 1655 | # Copyright 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This file contains the grpc implementation of the vtctl client.
"""
import datetime
from urlparse import urlparse
from vtdb import prefer_vtroot_imports # pylint: disable=unused-import
import grpc
import vtctl_client
from vtproto import vtctldata_pb2
from vtproto import vtctlservice_pb2
class GRPCVtctlClient(vtctl_client.VtctlClient):
"""GRPCVtctlClient is the gRPC implementation of VtctlClient.
It is registered as 'grpc' protocol.
"""
def __init__(self, addr, timeout):
self.addr = addr
self.timeout = timeout
self.stub = None
def __str__(self):
return '<GRPCVtctlClient %s>' % self.addr
def dial(self):
if self.stub:
self.stub.close()
p = urlparse('http://' + self.addr)
channel = grpc.insecure_channel('%s:%s' % (p.hostname, p.port))
self.stub = vtctlservice_pb2.VtctlStub(channel)
def close(self):
self.stub = None
def is_closed(self):
return self.stub is None
def execute_vtctl_command(self, args, action_timeout=30.0):
req = vtctldata_pb2.ExecuteVtctlCommandRequest(
args=args,
action_timeout=long(action_timeout * 1e9))
it = self.stub.ExecuteVtctlCommand(req, action_timeout)
for response in it:
t = datetime.datetime.utcfromtimestamp(response.event.time.seconds)
yield vtctl_client.Event(t, response.event.level, response.event.file,
response.event.line, response.event.value)
vtctl_client.register_conn_class('grpc', GRPCVtctlClient)
| bsd-3-clause |
wolfskaempf/ga_statistics | lib/python2.7/site-packages/django/contrib/gis/geos/mutable_list.py | 118 | 10979 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from django.utils import six
from django.utils.functional import total_ordering
from django.utils.six.moves import range
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
# ### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in range(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = (self._get_single_internal(i)
for i in range(origLen)
if i not in indexRange)
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in range(len(self)):
yield self[i]
# ### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n - 1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except self._IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except self._IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
# ### Public list interface Methods ###
# ## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i:
count += 1
return count
def index(self, val):
"Standard list index method"
for i in range(0, len(self)):
if self[i] == val:
return i
raise ValueError('%s not found in object' % str(val))
# ## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v), v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
# ### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in range(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in range(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| mit |
CaptainHayashi/lass | laconia/views.py | 1 | 7097 | from django.db.models.loading import get_model
from metadata.utils.date_range import in_range
from django.shortcuts import render
from django.utils import simplejson
from django.http import Http404, HttpResponse
from django.conf import settings
from schedule.utils import range as s_range
import csv
import json
# This is used to limit range_XYZ requests to prevent them from
# DoSing URY accidentally.
MAX_RANGE_LENGTH = 10 * 24 * 60 * 60 # Ten days
def laconia_error(request, message, status=403):
"""
Throws an error from the laconia interface.
The default status code emitted is 403 Forbidden.
"""
return render(
request,
'laconia/error.txt',
{'message': message},
content_type='text/plain',
status=status
)
def current_show_location_and_time(request):
"""Sends the current show location, time and show ID as text."""
# This just expects the current show to be given by context processors now.
return render(
request,
'laconia/current-show-location-and-time.txt',
content_type="text/plain"
)
def current_show_and_next(request):
"""Sends info about the current show as JSON."""
# In case the worst happens and the schedule doesn't come back with
# two items, we're very cautious about the size of day.
day = list(s_range.day(limit=2))
json_data = {}
if len(day) >= 1:
on_air = day[0]
if on_air.player_image:
image = on_air.player_image.url
else:
image = settings.STATIC_URL + "img/default_show_player.png"
json_data.update(
{
"onAir": on_air.title,
"onAirDesc": on_air.description,
"onAirPres": on_air.by_line(),
"onAirTime": '{:%H:%M} - {:%H:%M}'.format(
on_air.start_time, on_air.end_time
),
"onAirImg": image,
}
)
if len(day) >= 2:
up_next = day[1]
json_data.update(
{
"upNext": up_next.title,
"upNextDesc": up_next.description,
"upNextPres": up_next.by_line(),
"upNextTime": '{:%H:%M} - {:%H:%M}'.format(
up_next.start_time, up_next.end_time
)
}
)
return HttpResponse(
simplejson.dumps(json_data), content_type="application/json"
)
def range_querystring(request, appname, modelname, format='json'):
"""
Wrapper to `range` that expects its date range in the query
string.
Since this view mainly exists to accommodate FullCalendar, which
expects its output in JSON, the default format is JSON as opposed
to CSV.
"""
if 'start' not in request.GET or 'end' not in request.GET:
raise Http404
return range(
request,
appname,
modelname,
request.GET['start'],
request.GET['end'],
format
)
def range(request, appname, modelname, start, end, format='csv'):
"""
Retrieves a summary about any items in the given model that fall
within the given range.
Items are returned if any time within their own time range falls
within the given range.
If format is 'csv', the result is delivered as a CSV if the given
model exists and supports range queries, or a HTTP 404 if not.
The CSV may be empty.
If format is 'fullcal', the result is instead a JSON list
corresponding to the schema at http://arshaw.com/fullcalendar -
again if the given model cannot be queried for range a HTTP 404
will be emitted.
If the model supports metadata queries, the 'title' and
'description' metadata will be pulled if it exists.
If the model supports credit queries, the by-line will also be
added.
"""
model = get_model(appname, modelname)
if model is None:
raise Http404
start = int(start)
end = int(end)
# Request sanity checking
if (end - start) < 0:
response = laconia_error(
request,
'Requested range is negative.'
)
elif (end - start) > MAX_RANGE_LENGTH:
response = laconia_error(
request,
'Requested range is too long (max: {0} seconds)'.format(
MAX_RANGE_LENGTH
)
)
else:
try:
items = in_range(model, start, end)
except AttributeError:
# Assuming this means the model can't do range-based ops
raise Http404
filename = u'{0}-{1}-{2}-{3}'.format(
appname,
modelname,
start,
end
)
if format == 'csv':
f = range_csv
elif format == 'json':
f = range_json
else:
raise ValueError('Invalid format specifier.')
response = f(filename, items)
return response
def range_csv(filename, items):
"""
Returns a range query result in CSV format.
The order of items in the CSV rows are:
1) Primary key
2) Start time as UNIX timestamp
3) End time as UNIX timestamp
4) 'title' from default metadata strand, if metadata exists;
else blank
5) 'description' from default metadata strand, if metadata exists;
else blank
6) By-line, if credits exist; else blank
"""
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = (
u'attachment; filename="{0}.csv"'.format(filename)
)
writer = csv.writer(response)
for item in items:
writer.writerow([
item.pk,
item.range_start_unix(),
item.range_end_unix(),
getattr(item, 'title', ''),
getattr(item, 'description', ''),
getattr(item, 'by_line', lambda x: '')()
])
return response
def range_item_title(item):
"""
Returns the most sensible human-readable title for the item.
This is either the 'text'/'title' metadatum if the item supports
metadata, or the empty string (for loggerng compatibility
purposes, primarily).
"""
return getattr(item, 'title', '')
def range_item_dict(item):
"""
Returns a dictionary representing the information from a given
range item that is pertinent to a range query.
"""
return {
'id': item.pk,
'title': range_item_title(item),
'start': item.range_start_unix(),
'end': item.range_end_unix(),
}
def range_json(filename, items):
"""
Returns a range query in JSON (full-calendar) format.
The format used is described in
http://arshaw.com/fullcalendar/docs/event_data/Event_Object
If the range item supports metadata, the 'title' attribute will
correspond to the 'text'/'title' metadatum if it exists for the
start time of the item; else the item's Unicode representation
will be returned.
"""
return HttpResponse(
json.dumps(map(range_item_dict, items)),
mimetype='application/json'
)
| gpl-2.0 |
LaharlMontogmmery/-tg-station | bot/C_sarcasticball.py | 36 | 1509 | from random import choice as fsample
sarcastic_responses = ["Yeah right","What do I look like to you?","Are you kidding me?",#UsF
"As much as you","You don't believe that yourself","When pigs fly",#UsF
"Like your grandma","You would like to know, wouldn't you?", #UsF
"Like your mom", #Spectre
"Totally","Not at all", #Spectre
"AHAHAHahahaha, No.", #Strumpetplaya
"Not as much as USER","As much as USER",
"Really, you expect me to tell you that?",
"Right, and you've been building NOUNs for those USERs in the LOCATION, haven't you?" ] #Richard
locations = ["woods","baystation","ditch"]
nouns = ["bomb","toilet","robot","cyborg",
"garbage can","gun","cake",
"missile"]
def sarcasticball(data,debug,sender,users,prefix):
arg = data.lower().replace(prefix+"sarcasticball ","")
arg = arg.replace(prefix+"sball ","")
if debug:
print sender+":"+prefix+"sarcasticball", arg
choice = fsample(sarcastic_responses)
if "USER" in choice:
choice = choice.replace("USER",fsample(users),1)
choice = choice.replace("USER",fsample(users),1)
if "NOUN" in choice:
choice = choice.replace("NOUN",fsample(nouns),1)
if "LOCATION" in choice:
choice = choice.replace("LOCATION",fsample(locations),1)
if debug:
print "Responded with", choice
return(choice)
| gpl-3.0 |
leonzhu/shooter-player | Test/Update_Unittest/web/scripts/gen.py | 18 | 2227 | #!/usr/bin/python
#create patch files for ShooterPlayer
#
#
#
import glob
import optparse
import os
import re
import shutil
import sys
import hashlib
import subprocess
import gzip
import shutil
def createGzipFile(inname, outname):
f_in = open(inname, 'rb')
f_out = gzip.open(outname, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
def filemd5(fileName):
m = hashlib.md5()
try:
fd = open(fileName,"rb")
except IOError:
print "Unable to open the file in readmode:", filename
return
content = fd.read()
fd.close()
m.update(content)
return m.hexdigest()
def main(options, args):
latestMD5 = filemd5(options.latest)
gzfile = options.outputdir + "\\out.gz"
if not os.path.exists(options.outputdir):
os.mkdir(options.outputdir)
# createGzipFile(options.latest, gzfile)
createGzipFile(options.latest, "out.gz")
shutil.move("out.gz", gzfile)
gzmd5 = filemd5(gzfile)
latestszie = os.path.getsize(options.latest)
gzszie = os.path.getsize(gzfile)
descfile = open(options.outputdir + "\\desc.txt", "wa")
##PATH; MD5 of uncompressed; ID; TEMP PATH for unzipped file; MD5 of compressed; METHOD; LENGTH; GZ LENGTH
descfile.write(options.latest)
descfile.write(";")
descfile.write(latestMD5)
descfile.write(";1;")
#TEMP PATH for unzipped file
m = hashlib.md5()
m.update(options.latest)
descfile.write(m.hexdigest())
descfile.write(";")
descfile.write(gzmd5)
descfile.write(";default;")
descfile.write(str(latestszie))
descfile.write(";")
descfile.write(str(gzszie))
descfile.write(";")
descfile.write("\n")
descfile.close();
return 0
if '__main__' == __name__:
option_parser = optparse.OptionParser()
option_parser.add_option('', '--latest', default='splayer.exe',
help='path to latest release')
option_parser.add_option('', '--outputdir', default='.\\output',
help='output path')
options, args = option_parser.parse_args()
sys.exit(main(options, args)) | gpl-2.0 |
etashjian/ECE757-final | configs/ruby/MESI_Two_Level.py | 14 | 9228 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
#
# Note: the L1 Cache latency is only used by the sequencer on fast path hits
#
class L1Cache(RubyCache):
latency = 3
#
# Note: the L2 Cache latency is not currently used
#
class L2Cache(RubyCache):
latency = 15
def define_options(parser):
return
def create_system(options, full_system, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MESI_Two_Level':
fatal("This script requires the MESI_Two_Level protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
l2_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
l2_bits = int(math.log(options.num_l2caches, 2))
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
#
l1i_cache = L1Cache(size = options.l1i_size,
assoc = options.l1i_assoc,
start_index_bit = block_size_bits,
is_icache = True)
l1d_cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits,
is_icache = False)
prefetcher = RubyPrefetcher.Prefetcher()
l1_cntrl = L1Cache_Controller(version = i,
L1Icache = l1i_cache,
L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
send_evictions = send_evicts(options),
prefetcher = prefetcher,
ruby_system = ruby_system,
clk_domain=system.cpu[i].clk_domain,
transitions_per_cycle=options.ports,
enable_prefetch = False)
cpu_seq = RubySequencer(version = i,
icache = l1i_cache,
dcache = l1d_cache,
clk_domain=system.cpu[i].clk_domain,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
# Add controllers and sequencers to the appropriate lists
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.requestFromL1Cache = ruby_system.network.slave
l1_cntrl.responseFromL1Cache = ruby_system.network.slave
l1_cntrl.unblockFromL1Cache = ruby_system.network.slave
l1_cntrl.requestToL1Cache = ruby_system.network.master
l1_cntrl.responseToL1Cache = ruby_system.network.master
l2_index_start = block_size_bits + l2_bits
for i in xrange(options.num_l2caches):
#
# First create the Ruby objects associated with this cpu
#
l2_cache = L2Cache(size = options.l2_size,
assoc = options.l2_assoc,
start_index_bit = l2_index_start)
l2_cntrl = L2Cache_Controller(version = i,
L2cache = l2_cache,
transitions_per_cycle=options.ports,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
l2_cntrl_nodes.append(l2_cntrl)
# Connect the L2 controllers and the network
l2_cntrl.DirRequestFromL2Cache = ruby_system.network.slave
l2_cntrl.L1RequestFromL2Cache = ruby_system.network.slave
l2_cntrl.responseFromL2Cache = ruby_system.network.slave
l2_cntrl.unblockToL2Cache = ruby_system.network.master
l2_cntrl.L1RequestToL2Cache = ruby_system.network.master
l2_cntrl.responseToL2Cache = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = RubyDirectoryMemory(
version = i, size = dir_size),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = ruby_system.network.master
dir_cntrl.responseToDir = ruby_system.network.master
dir_cntrl.responseFromDir = ruby_system.network.slave
for i, dma_port in enumerate(dma_ports):
# Create the Ruby objects associated with the dma controller
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system,
slave = dma_port)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the dma controller to the network
dma_cntrl.responseFromDir = ruby_system.network.master
dma_cntrl.requestToDir = ruby_system.network.slave
all_cntrls = l1_cntrl_nodes + \
l2_cntrl_nodes + \
dir_cntrl_nodes + \
dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.responseFromDir = ruby_system.network.master
io_controller.requestToDir = ruby_system.network.slave
all_cntrls = all_cntrls + [io_controller]
topology = create_topology(all_cntrls, options)
return (cpu_sequencers, dir_cntrl_nodes, topology)
| bsd-3-clause |
catalpainternational/django-endless-pagination | tests/settings.py | 6 | 1145 | """Settings file for the Django project used for tests."""
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
PROJECT_NAME = 'project'
# Base paths.
ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT = os.path.join(ROOT, PROJECT_NAME)
# Django configuration.
DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3'}}
DEBUG = TEMPLATE_DEBUG = True
INSTALLED_APPS = (
'django.contrib.staticfiles',
'endless_pagination',
PROJECT_NAME,
)
LANGUAGE_CODE = os.getenv('ENDLESS_PAGINATION_LANGUAGE_CODE', 'en-us')
ROOT_URLCONF = PROJECT_NAME + '.urls'
SECRET_KEY = os.getenv('ENDLESS_PAGINATION_SECRET_KEY', 'secret')
SITE_ID = 1
STATIC_ROOT = os.path.join(PROJECT, 'static')
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
PROJECT_NAME + '.context_processors.navbar',
PROJECT_NAME + '.context_processors.versions',
)
TEMPLATE_DIRS = os.path.join(PROJECT, 'templates')
# Testing.
NOSE_ARGS = (
'--verbosity=2',
'--with-coverage',
'--cover-package=endless_pagination',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| mit |
galaxy001/libtorrent | BitTorrent-4.4.0/BitTorrent/PeerID.py | 1 | 1041 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.0 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Matt Chisholm
import os
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from time import time
try:
getpid = os.getpid
except AttributeError:
def getpid():
return 1
from BitTorrent import version
def make_id():
myid = 'M' + version.split()[0].replace('.', '-')
myid = myid + ('-' * (8-len(myid)))+sha(repr(time())+ ' ' +
str(getpid())).digest()[-6:].encode('hex')
return myid
| mit |
reddit/cabot | cabot/cabotapp/migrations/0001_initial.py | 16 | 19487 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Service'
db.create_table('cabotapp_service', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('url', self.gf('django.db.models.fields.TextField')(blank=True)),
('last_alert_sent', self.gf('django.db.models.fields.DateTimeField')
(null=True, blank=True)),
('email_alert', self.gf('django.db.models.fields.BooleanField')
(default=False)),
('hipchat_alert',
self.gf('django.db.models.fields.BooleanField')(default=True)),
('sms_alert', self.gf('django.db.models.fields.BooleanField')
(default=False)),
('telephone_alert',
self.gf('django.db.models.fields.BooleanField')(default=False)),
('alerts_enabled',
self.gf('django.db.models.fields.BooleanField')(default=True)),
('overall_status', self.gf('django.db.models.fields.TextField')
(default='PASSING')),
('old_overall_status',
self.gf('django.db.models.fields.TextField')(default='PASSING')),
('hackpad_id', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
))
db.send_create_signal('cabotapp', ['Service'])
# Adding M2M table for field users_to_notify on 'Service'
db.create_table('cabotapp_service_users_to_notify', (
('id', models.AutoField(verbose_name='ID',
primary_key=True, auto_created=True)),
('service',
models.ForeignKey(orm['cabotapp.service'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('cabotapp_service_users_to_notify',
['service_id', 'user_id'])
# Adding M2M table for field status_checks on 'Service'
db.create_table('cabotapp_service_status_checks', (
('id', models.AutoField(verbose_name='ID',
primary_key=True, auto_created=True)),
('service',
models.ForeignKey(orm['cabotapp.service'], null=False)),
('statuscheck',
models.ForeignKey(orm['cabotapp.statuscheck'], null=False))
))
db.create_unique('cabotapp_service_status_checks',
['service_id', 'statuscheck_id'])
# Adding model 'ServiceStatusSnapshot'
db.create_table('cabotapp_servicestatussnapshot', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('service', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='snapshots', to=orm['cabotapp.Service'])),
('time', self.gf('django.db.models.fields.DateTimeField')()),
('num_checks_active',
self.gf('django.db.models.fields.IntegerField')(default=0)),
('num_checks_passing',
self.gf('django.db.models.fields.IntegerField')(default=0)),
('num_checks_failing',
self.gf('django.db.models.fields.IntegerField')(default=0)),
('overall_status', self.gf('django.db.models.fields.TextField')
(default='PASSING')),
('did_send_alert',
self.gf('django.db.models.fields.IntegerField')(default=False)),
))
db.send_create_signal('cabotapp', ['ServiceStatusSnapshot'])
# Adding model 'StatusCheck'
db.create_table('cabotapp_statuscheck', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('polymorphic_ctype', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='polymorphic_cabotapp.statuscheck_set', null=True, to=orm['contenttypes.ContentType'])),
('name', self.gf('django.db.models.fields.TextField')()),
('active', self.gf('django.db.models.fields.BooleanField')
(default=True)),
('importance', self.gf('django.db.models.fields.CharField')
(default='ERROR', max_length=30)),
('frequency', self.gf('django.db.models.fields.IntegerField')
(default=5)),
('debounce', self.gf('django.db.models.fields.IntegerField')
(default=0, null=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('calculated_status', self.gf('django.db.models.fields.CharField')
(default='passing', max_length=50, blank=True)),
('last_run', self.gf('django.db.models.fields.DateTimeField')
(null=True)),
('cached_health',
self.gf('django.db.models.fields.TextField')(null=True)),
('metric', self.gf('django.db.models.fields.TextField')
(null=True)),
('check_type', self.gf('django.db.models.fields.CharField')
(max_length=100, null=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True)),
('expected_num_hosts', self.gf('django.db.models.fields.IntegerField')
(default=0, null=True)),
('endpoint', self.gf('django.db.models.fields.TextField')
(null=True)),
('username', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('password', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('text_match', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('status_code', self.gf('django.db.models.fields.TextField')
(default=200, null=True)),
('timeout', self.gf('django.db.models.fields.IntegerField')
(default=30, null=True)),
('max_queued_build_time',
self.gf(
'django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('cabotapp', ['StatusCheck'])
# Adding model 'StatusCheckResult'
db.create_table('cabotapp_statuscheckresult', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('check', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['cabotapp.StatusCheck'])),
('time', self.gf('django.db.models.fields.DateTimeField')()),
('time_complete',
self.gf('django.db.models.fields.DateTimeField')(null=True)),
('raw_data', self.gf('django.db.models.fields.TextField')
(null=True)),
('succeeded', self.gf('django.db.models.fields.BooleanField')
(default=False)),
('error', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('cabotapp', ['StatusCheckResult'])
# Adding model 'UserProfile'
db.create_table('cabotapp_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')
(related_name='profile', unique=True, to=orm['auth.User'])),
('mobile_number', self.gf('django.db.models.fields.CharField')
(default='', max_length=20, blank=True)),
('hipchat_alias', self.gf('django.db.models.fields.CharField')
(default='', max_length=50, blank=True)),
('fallback_alert_user',
self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('cabotapp', ['UserProfile'])
# Adding model 'Shift'
db.create_table('cabotapp_shift', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('end', self.gf('django.db.models.fields.DateTimeField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('uid', self.gf('django.db.models.fields.TextField')()),
('deleted', self.gf('django.db.models.fields.BooleanField')
(default=False)),
))
db.send_create_signal('cabotapp', ['Shift'])
def backwards(self, orm):
# Deleting model 'Service'
db.delete_table('cabotapp_service')
# Removing M2M table for field users_to_notify on 'Service'
db.delete_table('cabotapp_service_users_to_notify')
# Removing M2M table for field status_checks on 'Service'
db.delete_table('cabotapp_service_status_checks')
# Deleting model 'ServiceStatusSnapshot'
db.delete_table('cabotapp_servicestatussnapshot')
# Deleting model 'StatusCheck'
db.delete_table('cabotapp_statuscheck')
# Deleting model 'StatusCheckResult'
db.delete_table('cabotapp_statuscheckresult')
# Deleting model 'UserProfile'
db.delete_table('cabotapp_userprofile')
# Deleting model 'Shift'
db.delete_table('cabotapp_shift')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cabotapp.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'cabotapp.servicestatussnapshot': {
'Meta': {'object_name': 'ServiceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': "orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'cabotapp.shift': {
'Meta': {'object_name': 'Shift'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabotapp.statuscheck': {
'Meta': {'ordering': "['name']", 'object_name': 'StatusCheck'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cached_health': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'calculated_status': ('django.db.models.fields.CharField', [], {'default': "'passing'", 'max_length': '50', 'blank': 'True'}),
'check_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'debounce': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'endpoint': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'expected_num_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'default': "'ERROR'", 'max_length': '30'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'max_queued_build_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'metric': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_cabotapp.statuscheck_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'status_code': ('django.db.models.fields.TextField', [], {'default': '200', 'null': 'True'}),
'text_match': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'cabotapp.statuscheckresult': {
'Meta': {'object_name': 'StatusCheckResult'},
'check': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabotapp.StatusCheck']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'cabotapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'fallback_alert_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabotapp']
| mit |
epfl-lts2/pygsp | pygsp/graphs/nngraphs/cube.py | 1 | 3294 | # -*- coding: utf-8 -*-
import numpy as np
from pygsp.graphs import NNGraph # prevent circular import in Python < 3.5
class Cube(NNGraph):
r"""Hyper-cube (NN-graph).
Parameters
----------
radius : float
Edge lenght (default = 1)
nb_pts : int
Number of vertices (default = 300)
nb_dim : int
Dimension (default = 3)
sampling : string
Variance of the distance kernel (default = 'random')
(Can now only be 'random')
seed : int
Seed for the random number generator (for reproducible graphs).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.Cube(seed=42)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121)
>>> ax2 = fig.add_subplot(122, projection='3d')
>>> _ = ax1.spy(G.W, markersize=0.5)
>>> _ = G.plot(ax=ax2)
"""
def __init__(self,
radius=1,
nb_pts=300,
nb_dim=3,
sampling='random',
seed=None,
**kwargs):
self.radius = radius
self.nb_pts = nb_pts
self.nb_dim = nb_dim
self.sampling = sampling
self.seed = seed
rs = np.random.RandomState(seed)
if self.nb_dim > 3:
raise NotImplementedError("Dimension > 3 not supported yet!")
if self.sampling == "random":
if self.nb_dim == 2:
pts = rs.rand(self.nb_pts, self.nb_dim)
elif self.nb_dim == 3:
n = self.nb_pts // 6
pts = np.zeros((n*6, 3))
pts[:n, 1:] = rs.rand(n, 2)
pts[n:2*n, :] = np.concatenate((np.ones((n, 1)),
rs.rand(n, 2)),
axis=1)
pts[2*n:3*n, :] = np.concatenate((rs.rand(n, 1),
np.zeros((n, 1)),
rs.rand(n, 1)),
axis=1)
pts[3*n:4*n, :] = np.concatenate((rs.rand(n, 1),
np.ones((n, 1)),
rs.rand(n, 1)),
axis=1)
pts[4*n:5*n, :2] = rs.rand(n, 2)
pts[5*n:6*n, :] = np.concatenate((rs.rand(n, 2),
np.ones((n, 1))),
axis=1)
else:
raise ValueError("Unknown sampling !")
plotting = {
'vertex_size': 80,
'elevation': 15,
'azimuth': 0,
'distance': 9,
}
super(Cube, self).__init__(Xin=pts, k=10,
center=False, rescale=False,
plotting=plotting, **kwargs)
def _get_extra_repr(self):
attrs = {'radius': '{:.2f}'.format(self.radius),
'nb_pts': self.nb_pts,
'nb_dim': self.nb_dim,
'sampling': self.sampling,
'seed': self.seed}
attrs.update(super(Cube, self)._get_extra_repr())
return attrs
| bsd-3-clause |
runt18/mojo | third_party/cython/src/Cython/Compiler/TreeFragment.py | 1 | 9033 | #
# TreeFragments - parsing of strings to trees
#
import re
from StringIO import StringIO
from Scanning import PyrexScanner, StringSourceDescriptor
from Symtab import ModuleScope
import PyrexTypes
from Visitor import VisitorTransform
from Nodes import Node, StatListNode
from ExprNodes import NameNode
import Parsing
import Main
import UtilNodes
"""
Support for parsing strings into code trees.
"""
class StringParseContext(Main.Context):
def __init__(self, name, include_directories=None):
if include_directories is None: include_directories = []
Main.Context.__init__(self, include_directories, {},
create_testscope=False)
self.module_name = name
def find_module(self, module_name, relative_to = None, pos = None, need_pxd = 1):
if module_name not in (self.module_name, 'cython'):
raise AssertionError("Not yet supporting any cimports/includes from string code snippets")
return ModuleScope(module_name, parent_module = None, context = self)
def parse_from_strings(name, code, pxds=None, level=None, initial_pos=None,
context=None, allow_struct_enum_decorator=False):
"""
Utility method to parse a (unicode) string of code. This is mostly
used for internal Cython compiler purposes (creating code snippets
that transforms should emit, as well as unit testing).
code - a unicode string containing Cython (module-level) code
name - a descriptive name for the code source (to use in error messages etc.)
RETURNS
The tree, i.e. a ModuleNode. The ModuleNode's scope attribute is
set to the scope used when parsing.
"""
if pxds is None:
pxds = {}
if context is None:
context = StringParseContext(name)
# Since source files carry an encoding, it makes sense in this context
# to use a unicode string so that code fragments don't have to bother
# with encoding. This means that test code passed in should not have an
# encoding header.
assert isinstance(code, unicode), "unicode code snippets only please"
encoding = "UTF-8"
module_name = name
if initial_pos is None:
initial_pos = (name, 1, 0)
code_source = StringSourceDescriptor(name, code)
scope = context.find_module(module_name, pos = initial_pos, need_pxd = 0)
buf = StringIO(code)
scanner = PyrexScanner(buf, code_source, source_encoding = encoding,
scope = scope, context = context, initial_pos = initial_pos)
ctx = Parsing.Ctx(allow_struct_enum_decorator=allow_struct_enum_decorator)
if level is None:
tree = Parsing.p_module(scanner, 0, module_name, ctx=ctx)
tree.scope = scope
tree.is_pxd = False
else:
tree = Parsing.p_code(scanner, level=level, ctx=ctx)
tree.scope = scope
return tree
class TreeCopier(VisitorTransform):
def visit_Node(self, node):
if node is None:
return node
else:
c = node.clone_node()
self.visitchildren(c)
return c
class ApplyPositionAndCopy(TreeCopier):
def __init__(self, pos):
super(ApplyPositionAndCopy, self).__init__()
self.pos = pos
def visit_Node(self, node):
copy = super(ApplyPositionAndCopy, self).visit_Node(node)
copy.pos = self.pos
return copy
class TemplateTransform(VisitorTransform):
"""
Makes a copy of a template tree while doing substitutions.
A dictionary "substitutions" should be passed in when calling
the transform; mapping names to replacement nodes. Then replacement
happens like this:
- If an ExprStatNode contains a single NameNode, whose name is
a key in the substitutions dictionary, the ExprStatNode is
replaced with a copy of the tree given in the dictionary.
It is the responsibility of the caller that the replacement
node is a valid statement.
- If a single NameNode is otherwise encountered, it is replaced
if its name is listed in the substitutions dictionary in the
same way. It is the responsibility of the caller to make sure
that the replacement nodes is a valid expression.
Also a list "temps" should be passed. Any names listed will
be transformed into anonymous, temporary names.
Currently supported for tempnames is:
NameNode
(various function and class definition nodes etc. should be added to this)
Each replacement node gets the position of the substituted node
recursively applied to every member node.
"""
temp_name_counter = 0
def __call__(self, node, substitutions, temps, pos):
self.substitutions = substitutions
self.pos = pos
tempmap = {}
temphandles = []
for temp in temps:
TemplateTransform.temp_name_counter += 1
handle = UtilNodes.TempHandle(PyrexTypes.py_object_type)
tempmap[temp] = handle
temphandles.append(handle)
self.tempmap = tempmap
result = super(TemplateTransform, self).__call__(node)
if temps:
result = UtilNodes.TempsBlockNode(self.get_pos(node),
temps=temphandles,
body=result)
return result
def get_pos(self, node):
if self.pos:
return self.pos
else:
return node.pos
def visit_Node(self, node):
if node is None:
return None
else:
c = node.clone_node()
if self.pos is not None:
c.pos = self.pos
self.visitchildren(c)
return c
def try_substitution(self, node, key):
sub = self.substitutions.get(key)
if sub is not None:
pos = self.pos
if pos is None: pos = node.pos
return ApplyPositionAndCopy(pos)(sub)
else:
return self.visit_Node(node) # make copy as usual
def visit_NameNode(self, node):
temphandle = self.tempmap.get(node.name)
if temphandle:
# Replace name with temporary
return temphandle.ref(self.get_pos(node))
else:
return self.try_substitution(node, node.name)
def visit_ExprStatNode(self, node):
# If an expression-as-statement consists of only a replaceable
# NameNode, we replace the entire statement, not only the NameNode
if isinstance(node.expr, NameNode):
return self.try_substitution(node, node.expr.name)
else:
return self.visit_Node(node)
def copy_code_tree(node):
return TreeCopier()(node)
INDENT_RE = re.compile(ur"^ *")
def strip_common_indent(lines):
"Strips empty lines and common indentation from the list of strings given in lines"
# TODO: Facilitate textwrap.indent instead
lines = [x for x in lines if x.strip() != u""]
minindent = min([len(INDENT_RE.match(x).group(0)) for x in lines])
lines = [x[minindent:] for x in lines]
return lines
class TreeFragment(object):
def __init__(self, code, name="(tree fragment)", pxds=None, temps=None, pipeline=None, level=None, initial_pos=None):
if pxds is None:
pxds = {}
if temps is None:
temps = []
if pipeline is None:
pipeline = []
if isinstance(code, unicode):
def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n")))
fmt_code = fmt(code)
fmt_pxds = {}
for key, value in pxds.iteritems():
fmt_pxds[key] = fmt(value)
mod = t = parse_from_strings(name, fmt_code, fmt_pxds, level=level, initial_pos=initial_pos)
if level is None:
t = t.body # Make sure a StatListNode is at the top
if not isinstance(t, StatListNode):
t = StatListNode(pos=mod.pos, stats=[t])
for transform in pipeline:
if transform is None:
continue
t = transform(t)
self.root = t
elif isinstance(code, Node):
if pxds != {}: raise NotImplementedError()
self.root = code
else:
raise ValueError("Unrecognized code format (accepts unicode and Node)")
self.temps = temps
def copy(self):
return copy_code_tree(self.root)
def substitute(self, nodes=None, temps=None, pos = None):
if nodes is None:
nodes = {}
if temps is None:
temps = []
return TemplateTransform()(self.root,
substitutions = nodes,
temps = self.temps + temps, pos = pos)
class SetPosTransform(VisitorTransform):
def __init__(self, pos):
super(SetPosTransform, self).__init__()
self.pos = pos
def visit_Node(self, node):
node.pos = self.pos
self.visitchildren(node)
return node
| bsd-3-clause |
nijel/weblate | weblate/addons/git.py | 1 | 9204 | #
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from collections import defaultdict
from itertools import chain
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_POST_COMMIT
from weblate.addons.forms import GitSquashForm
from weblate.utils.errors import report_error
from weblate.vcs.base import RepositoryException
class GitSquashAddon(BaseAddon):
name = "weblate.git.squash"
verbose = _("Squash Git commits")
description = _("Squash Git commits prior to pushing changes.")
settings_form = GitSquashForm
compat = {
"vcs": {
"git",
"gerrit",
"subversion",
"github",
"pagure",
"gitlab",
"git-force-push",
}
}
events = (EVENT_POST_COMMIT,)
icon = "compress.svg"
repo_scope = True
def squash_all(self, component, repository, base=None, author=None):
remote = base if base else repository.get_remote_branch_name()
message = self.get_squash_commit_message(repository, "%B", remote)
repository.execute(["reset", "--mixed", remote])
# Can happen for added and removed translation
if repository.needs_commit():
repository.commit(message, author)
def get_filenames(self, component):
languages = defaultdict(list)
for origin in [component] + list(component.linked_childs):
for translation in origin.translation_set.prefetch_related("language"):
code = translation.language.code
if not translation.filename:
continue
languages[code].extend(translation.filenames)
return languages
def get_git_commit_messages(self, repository, log_format, remote, filenames):
command = [
"log",
f"--format={log_format}",
f"{remote}..HEAD",
]
if filenames:
command += ["--"] + filenames
return repository.execute(command)
def get_squash_commit_message(self, repository, log_format, remote, filenames=None):
commit_message = self.instance.configuration.get("commit_message")
if self.instance.configuration.get("append_trailers", True):
command = [
"log",
"--format=%(trailers)%nCo-authored-by: %an <%ae>",
f"{remote}..HEAD",
]
if filenames:
command += ["--"] + filenames
trailer_lines = {
trailer
for trailer in repository.execute(command).split("\n")
if trailer.strip()
}
if commit_message:
# Predefined commit message
body = [commit_message]
else:
# Extract commit messages from the log
body = [
line
for line in self.get_git_commit_messages(
repository, log_format, remote, filenames
).split("\n")
if line not in trailer_lines
]
commit_message = "\n".join(
chain(
# Body
body,
# Blank line
[""],
# Trailers
sorted(trailer_lines),
)
).strip("\n")
elif not commit_message:
commit_message = self.get_git_commit_messages(
repository, log_format, remote, filenames
)
return commit_message
def squash_language(self, component, repository):
remote = repository.get_remote_branch_name()
languages = self.get_filenames(component)
messages = {}
for code, filenames in languages.items():
if not filenames:
continue
messages[code] = self.get_squash_commit_message(
repository, "%B", remote, filenames
)
repository.execute(["reset", "--mixed", remote])
for code, message in messages.items():
if not message:
continue
repository.commit(message, files=languages[code])
def squash_file(self, component, repository):
remote = repository.get_remote_branch_name()
languages = self.get_filenames(component)
messages = {}
for filenames in languages.values():
for filename in filenames:
messages[filename] = self.get_squash_commit_message(
repository, "%B", remote, [filename]
)
repository.execute(["reset", "--mixed", remote])
for filename, message in messages.items():
if not message:
continue
repository.commit(message, files=[filename])
def squash_author(self, component, repository):
remote = repository.get_remote_branch_name()
# Get list of pending commits with authors
commits = [
x.split(None, 1)
for x in reversed(
repository.execute(
["log", "--format=%H %aE", f"{remote}..HEAD"]
).splitlines()
)
]
gpg_sign = repository.get_gpg_sign_args()
tmp = "weblate-squash-tmp"
repository.delete_branch(tmp)
try:
# Create local branch for upstream
repository.execute(["branch", tmp, remote])
# Checkout upstream branch
repository.execute(["checkout", tmp])
while commits:
commit, author = commits.pop(0)
# Remember current revision for final squash
base = repository.get_last_revision()
# Cherry pick current commit (this should work
# unless something is messed up)
repository.execute(["cherry-pick", commit] + gpg_sign)
handled = []
# Pick other commits by same author
for i, other in enumerate(commits):
if other[1] != author:
continue
try:
repository.execute(["cherry-pick", other[0]] + gpg_sign)
handled.append(i)
except RepositoryException:
# If fails, continue to another author, we will
# pick this commit later (it depends on some other)
repository.execute(["cherry-pick", "--abort"])
break
# Remove processed commits from list
for i in reversed(handled):
del commits[i]
# Squash all current commits from one author
self.squash_all(component, repository, base, author)
# Update working copy with squashed commits
repository.execute(["checkout", repository.branch])
repository.execute(["reset", "--hard", tmp])
repository.delete_branch(tmp)
except RepositoryException:
report_error(cause="Failed squash")
# Revert to original branch without any changes
repository.execute(["reset", "--hard"])
repository.execute(["checkout", repository.branch])
repository.delete_branch(tmp)
def post_commit(self, component):
repository = component.repository
with repository.lock:
# Ensure repository is rebased on current remote prior to squash, otherwise
# we might be squashing upstream changes as well due to reset.
if component.repo_needs_merge() and not component.update_branch(
method="rebase", skip_push=True
):
return
if not repository.needs_push():
return
method = getattr(
self, "squash_{}".format(self.instance.configuration["squash"])
)
method(component, repository)
# Commit any left files, those were most likely generated
# by addon and do not exactly match patterns above
component.commit_files(
template=component.addon_message,
extra_context={"addon_name": self.verbose},
signals=False,
skip_push=True,
)
| gpl-3.0 |
campagnola/acq4 | acq4/analysis/modules/AtlasBuilder/ctrlTemplate.py | 3 | 3715 | # -*- coding: utf-8 -*-
from __future__ import print_function
# Form implementation generated from reading ui file './acq4/analysis/modules/AtlasBuilder/ctrlTemplate.ui'
#
# Created: Tue Dec 24 01:49:12 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(170, 179)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.valueSpin = QtGui.QSpinBox(self.groupBox)
self.valueSpin.setMaximum(255)
self.valueSpin.setObjectName(_fromUtf8("valueSpin"))
self.gridLayout_2.addWidget(self.valueSpin, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.labelText = QtGui.QLineEdit(self.groupBox)
self.labelText.setObjectName(_fromUtf8("labelText"))
self.gridLayout_2.addWidget(self.labelText, 1, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 2, 0, 1, 1)
self.colorBtn = ColorButton(self.groupBox)
self.colorBtn.setText(_fromUtf8(""))
self.colorBtn.setObjectName(_fromUtf8("colorBtn"))
self.gridLayout_2.addWidget(self.colorBtn, 2, 1, 1, 1)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 2)
self.label_4 = QtGui.QLabel(Form)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 1, 0, 1, 1)
self.penSizeSpin = QtGui.QSpinBox(Form)
self.penSizeSpin.setMinimum(1)
self.penSizeSpin.setProperty("value", 1)
self.penSizeSpin.setObjectName(_fromUtf8("penSizeSpin"))
self.gridLayout.addWidget(self.penSizeSpin, 1, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 2, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Labels", None))
self.label.setText(_translate("Form", "Value", None))
self.label_2.setText(_translate("Form", "Label", None))
self.label_3.setText(_translate("Form", "Color", None))
self.label_4.setText(_translate("Form", "Pen Size", None))
from acq4.pyqtgraph.ColorButton import ColorButton
| mit |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/encodings/punycode.py | 586 | 6813 | # -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-2.0 |
bfontecc007/osbs-client | tests/build_/test_arrangements.py | 1 | 35719 | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import shutil
import os
import json
from osbs.api import OSBS
from osbs.constants import (DEFAULT_ARRANGEMENT_VERSION,
ORCHESTRATOR_INNER_TEMPLATE,
WORKER_INNER_TEMPLATE,
SECRETS_PATH,
ORCHESTRATOR_OUTER_TEMPLATE)
from osbs import utils
from osbs.repo_utils import RepoInfo
from osbs.build.build_request import BuildRequest
from tests.constants import (TEST_GIT_URI,
TEST_GIT_REF,
TEST_GIT_BRANCH,
TEST_COMPONENT,
TEST_VERSION,
TEST_FILESYSTEM_KOJI_TASK_ID,
INPUTS_PATH)
from tests.fake_api import openshift, osbs, osbs_with_pulp # noqa:F401
from tests.test_api import request_as_response
from tests.build_.test_build_request import (get_plugins_from_build_json,
get_plugin,
plugin_value_get,
NoSuchPluginException)
from flexmock import flexmock
import pytest
class ArrangementBase(object):
COMMON_PARAMS = {}
ORCHESTRATOR_ADD_PARAMS = {}
WORKER_ADD_PARAMS = {}
def mock_env(self, base_image='fedora23/python'):
class MockParser(object):
labels = {
'name': 'fedora23/something',
'com.redhat.component': TEST_COMPONENT,
'version': TEST_VERSION,
}
baseimage = base_image
(flexmock(utils)
.should_receive('get_repo_info')
.with_args(TEST_GIT_URI, TEST_GIT_REF, git_branch=TEST_GIT_BRANCH)
.and_return(RepoInfo(MockParser())))
# Trick create_orchestrator_build into return the *request* JSON
flexmock(OSBS, _create_build_config_and_build=request_as_response)
flexmock(OSBS, _create_scratch_build=request_as_response)
@pytest.mark.parametrize('template', [ # noqa:F811
ORCHESTRATOR_INNER_TEMPLATE,
WORKER_INNER_TEMPLATE,
])
def test_running_order(self, osbs, template):
"""
Verify the plugin running order.
This is to catch tests missing from these test classes when a
plugin is added.
"""
inner_template = template.format(
arrangement_version=self.ARRANGEMENT_VERSION,
)
build_request = osbs.get_build_request(inner_template=inner_template)
inner = build_request.inner_template
phases = ('prebuild_plugins',
'buildstep_plugins',
'prepublish_plugins',
'postbuild_plugins',
'exit_plugins')
actual = {}
for phase in phases:
actual[phase] = [plugin['name']
for plugin in inner.get(phase, {})]
assert actual == self.DEFAULT_PLUGINS[template]
def get_build_request(self, build_type, osbs, # noqa:F811
additional_params=None):
self.mock_env(base_image=additional_params.get('base_image'))
params = self.COMMON_PARAMS.copy()
assert build_type in ('orchestrator', 'worker')
if build_type == 'orchestrator':
params.update(self.ORCHESTRATOR_ADD_PARAMS)
fn = osbs.create_orchestrator_build
elif build_type == 'worker':
params.update(self.WORKER_ADD_PARAMS)
fn = osbs.create_worker_build
params.update(additional_params or {})
params['arrangement_version'] = self.ARRANGEMENT_VERSION
return params, fn(**params).json
def get_orchestrator_build_request(self, osbs, # noqa:F811
additional_params=None):
return self.get_build_request('orchestrator', osbs, additional_params)
def get_worker_build_request(self, osbs, # noqa:F811
additional_params=None):
return self.get_build_request('worker', osbs, additional_params)
def assert_plugin_not_present(self, build_json, phase, name):
plugins = get_plugins_from_build_json(build_json)
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, phase, name)
def get_pulp_sync_registry(self, conf):
"""Return the docker registry used by pulp content sync."""
for registry_uri in conf.get_registry_uris():
registry = utils.RegistryURI(registry_uri)
if registry.version == 'v2':
return registry.docker_uri
class TestArrangementV1(ArrangementBase):
"""
This class tests support for the oldest supported arrangement
version, 1.
NOTE! When removing this test class, *make sure* that any methods
it provides for the test class for the next oldest supported
arrangement version are copied across to that test class.
"""
ARRANGEMENT_VERSION = 1
COMMON_PARAMS = {
'git_uri': TEST_GIT_URI,
'git_ref': TEST_GIT_REF,
'git_branch': TEST_GIT_BRANCH,
'user': 'john-foo',
'component': TEST_COMPONENT,
'openshift_uri': 'http://openshift/',
}
ORCHESTRATOR_ADD_PARAMS = {
'platforms': ['x86_64'],
}
WORKER_ADD_PARAMS = {
'platform': 'x86_64',
'release': 1,
}
DEFAULT_PLUGINS = {
# Changing this? Add test methods
ORCHESTRATOR_INNER_TEMPLATE: {
'prebuild_plugins': [
'pull_base_image',
'bump_release',
'add_labels_in_dockerfile',
'reactor_config',
],
'buildstep_plugins': [
'orchestrate_build',
],
'prepublish_plugins': [
],
'postbuild_plugins': [
],
'exit_plugins': [
'store_metadata_in_osv3',
'remove_built_image',
],
},
# Changing this? Add test methods
WORKER_INNER_TEMPLATE: {
'prebuild_plugins': [
'add_filesystem',
'pull_base_image',
'add_labels_in_dockerfile',
'change_from_in_dockerfile',
'add_help',
'add_dockerfile',
'distgit_fetch_artefacts',
'fetch_maven_artifacts',
'koji',
'add_yum_repo_by_url',
'inject_yum_repo',
'distribution_scope',
],
'buildstep_plugins': [
],
'prepublish_plugins': [
'squash',
],
'postbuild_plugins': [
'all_rpm_packages',
'tag_by_labels',
'tag_from_config',
'tag_and_push',
'pulp_push',
'pulp_sync',
'compress',
'pulp_pull',
],
'exit_plugins': [
'delete_from_registry', # not tested
'koji_promote', # not tested
'store_metadata_in_osv3', # not tested
'koji_tag_build', # not tested
'sendmail', # not tested
'remove_built_image', # not tested
],
},
}
@pytest.mark.parametrize('build_type', [ # noqa:F811
'orchestrator',
'worker',
])
@pytest.mark.parametrize('scratch', [False, True])
@pytest.mark.parametrize('base_image, expect_plugin', [
('koji/image-build', False),
('foo', True),
])
def test_pull_base_image(self, osbs, build_type, scratch,
base_image, expect_plugin):
phase = 'prebuild_plugins'
plugin = 'pull_base_image'
additional_params = {
'base_image': base_image,
}
if scratch:
additional_params['scratch'] = True
(params, build_json) = self.get_build_request(build_type,
osbs,
additional_params)
plugins = get_plugins_from_build_json(build_json)
if not expect_plugin:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, phase, plugin)
else:
args = plugin_value_get(plugins, phase, plugin, 'args')
allowed_args = set([
'parent_registry',
'parent_registry_insecure',
])
assert set(args.keys()) <= allowed_args
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
@pytest.mark.parametrize('base_image', ['koji/image-build', 'foo'])
def test_delete_from_registry(self, osbs_with_pulp, base_image, scratch):
phase = 'exit_plugins'
plugin = 'delete_from_registry'
additional_params = {
'base_image': base_image,
}
if scratch:
additional_params['scratch'] = True
(params, build_json) = self.get_build_request('worker',
osbs_with_pulp,
additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, phase, plugin, 'args')
allowed_args = set([
'registries',
])
assert set(args.keys()) <= allowed_args
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
@pytest.mark.parametrize('base_image, expect_plugin', [
('koji/image-build', True),
('foo', False)
])
def test_add_filesystem_in_worker(self, osbs, base_image, scratch,
expect_plugin):
additional_params = {
'base_image': base_image,
'yum_repourls': ['https://example.com/my.repo'],
}
if scratch:
additional_params['scratch'] = True
params, build_json = self.get_worker_build_request(osbs,
additional_params)
plugins = get_plugins_from_build_json(build_json)
if not expect_plugin:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'prebuild_plugins', 'add_filesystem')
else:
args = plugin_value_get(plugins, 'prebuild_plugins',
'add_filesystem', 'args')
allowed_args = set([
'koji_hub',
'repos',
])
assert set(args.keys()) <= allowed_args
assert 'koji_hub' in args
assert args['repos'] == params['yum_repourls']
class TestArrangementV2(TestArrangementV1):
"""
Differences from arrangement version 1:
- add_filesystem runs with different parameters
- add_filesystem also runs in orchestrator build
- koji_parent runs in orchestrator build
"""
ARRANGEMENT_VERSION = 2
WORKER_ADD_PARAMS = {
'platform': 'x86_64',
'release': 1,
'filesystem_koji_task_id': TEST_FILESYSTEM_KOJI_TASK_ID,
}
DEFAULT_PLUGINS = {
# Changing this? Add test methods
ORCHESTRATOR_INNER_TEMPLATE: {
'prebuild_plugins': [
'add_filesystem',
'pull_base_image',
'bump_release',
'add_labels_in_dockerfile',
'koji_parent',
'reactor_config',
],
'buildstep_plugins': [
'orchestrate_build',
],
'prepublish_plugins': [
],
'postbuild_plugins': [
],
'exit_plugins': [
'store_metadata_in_osv3',
'remove_built_image',
],
},
# Changing this? Add test methods
WORKER_INNER_TEMPLATE: {
'prebuild_plugins': [
'add_filesystem',
'pull_base_image',
'add_labels_in_dockerfile',
'change_from_in_dockerfile',
'add_help',
'add_dockerfile',
'distgit_fetch_artefacts',
'fetch_maven_artifacts',
'koji',
'add_yum_repo_by_url',
'inject_yum_repo',
'distribution_scope',
],
'buildstep_plugins': [
],
'prepublish_plugins': [
'squash',
],
'postbuild_plugins': [
'all_rpm_packages',
'tag_by_labels',
'tag_from_config',
'tag_and_push',
'pulp_push',
'pulp_sync',
'compress',
'pulp_pull',
],
'exit_plugins': [
'delete_from_registry',
'koji_promote',
'store_metadata_in_osv3',
'koji_tag_build',
'sendmail',
'remove_built_image',
],
},
}
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
@pytest.mark.parametrize('base_image, expect_plugin', [
('koji/image-build', True),
('foo', False)
])
def test_add_filesystem_in_orchestrator(self, osbs, base_image, scratch,
expect_plugin):
additional_params = {
'base_image': base_image,
'yum_repourls': ['https://example.com/my.repo'],
}
if scratch:
additional_params['scratch'] = True
(params,
build_json) = self.get_orchestrator_build_request(osbs,
additional_params)
plugins = get_plugins_from_build_json(build_json)
if not expect_plugin:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'prebuild_plugins', 'add_filesystem')
else:
args = plugin_value_get(plugins, 'prebuild_plugins',
'add_filesystem', 'args')
allowed_args = set([
'koji_hub',
'repos',
'architectures',
])
assert set(args.keys()) <= allowed_args
assert 'koji_hub' in args
assert args['repos'] == params['yum_repourls']
assert args['architectures'] == params['platforms']
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
@pytest.mark.parametrize('base_image, expect_plugin', [
('koji/image-build', True),
('foo', False)
])
def test_add_filesystem_in_worker(self, osbs, base_image, scratch,
expect_plugin):
additional_params = {
'base_image': base_image,
'yum_repourls': ['https://example.com/my.repo'],
}
if scratch:
additional_params['scratch'] = True
params, build_json = self.get_worker_build_request(osbs,
additional_params)
plugins = get_plugins_from_build_json(build_json)
if not expect_plugin:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'prebuild_plugins', 'add_filesystem')
else:
args = plugin_value_get(plugins, 'prebuild_plugins',
'add_filesystem', 'args')
allowed_args = set([
'koji_hub',
'repos',
'from_task_id',
])
assert set(args.keys()) <= allowed_args
assert 'koji_hub' in args
assert args['repos'] == params['yum_repourls']
assert args['from_task_id'] == params['filesystem_koji_task_id']
@pytest.mark.parametrize(('scratch', 'base_image', 'expect_plugin'), [ # noqa:F811
(True, 'koji/image-build', False),
(True, 'foo', False),
(False, 'koji/image-build', False),
(False, 'foo', True),
])
def test_koji_parent_in_orchestrator(self, osbs, base_image, scratch,
expect_plugin):
additional_params = {
'base_image': base_image,
}
if scratch:
additional_params['scratch'] = True
params, build_json = self.get_orchestrator_build_request(osbs, additional_params)
plugins = get_plugins_from_build_json(build_json)
if not expect_plugin:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'prebuild_plugins', 'koji_parent')
else:
args = plugin_value_get(plugins, 'prebuild_plugins',
'koji_parent', 'args')
allowed_args = set([
'koji_hub',
])
assert set(args.keys()) <= allowed_args
assert 'koji_hub' in args
class TestArrangementV3(TestArrangementV2):
"""
Differences from arrangement version 2:
- fetch_worker_metadata, koji_import, koji_tag_build, sendmail,
check_and_set_rebuild, run in the orchestrator build
- koji_upload runs in the worker build
- koji_promote does not run
"""
ARRANGEMENT_VERSION = 3
DEFAULT_PLUGINS = {
# Changing this? Add test methods
ORCHESTRATOR_INNER_TEMPLATE: {
'prebuild_plugins': [
'add_filesystem',
'pull_base_image',
'bump_release',
'add_labels_in_dockerfile',
'koji_parent',
'reactor_config',
'check_and_set_rebuild',
],
'buildstep_plugins': [
'orchestrate_build',
],
'prepublish_plugins': [
],
'postbuild_plugins': [
'fetch_worker_metadata',
],
'exit_plugins': [
'delete_from_registry',
'koji_import',
'koji_tag_build',
'store_metadata_in_osv3',
'sendmail',
'remove_built_image',
],
},
# Changing this? Add test methods
WORKER_INNER_TEMPLATE: {
'prebuild_plugins': [
'add_filesystem',
'pull_base_image',
'add_labels_in_dockerfile',
'change_from_in_dockerfile',
'add_help',
'add_dockerfile',
'distgit_fetch_artefacts',
'fetch_maven_artifacts',
'koji',
'add_yum_repo_by_url',
'inject_yum_repo',
'distribution_scope',
],
'buildstep_plugins': [
],
'prepublish_plugins': [
'squash',
],
'postbuild_plugins': [
'all_rpm_packages',
'tag_by_labels',
'tag_from_config',
'tag_and_push',
'pulp_push',
'pulp_sync',
'compress',
'koji_upload',
'pulp_pull',
],
'exit_plugins': [
'delete_from_registry',
'store_metadata_in_osv3',
'remove_built_image',
],
},
}
def test_is_default(self):
"""
Test this is the default arrangement
"""
# Note! If this test fails it probably means you need to
# derive a new TestArrangementV[n] class from this class and
# move the method to the new class.
assert DEFAULT_ARRANGEMENT_VERSION == self.ARRANGEMENT_VERSION
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
def test_koji_upload(self, osbs, scratch):
additional_params = {
'base_image': 'fedora:latest',
'koji_upload_dir': 'upload',
}
if scratch:
additional_params['scratch'] = True
params, build_json = self.get_worker_build_request(osbs, additional_params)
plugins = get_plugins_from_build_json(build_json)
if scratch:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'postbuild_plugins', 'koji_upload')
return
args = plugin_value_get(plugins, 'postbuild_plugins',
'koji_upload', 'args')
match_args = {
'blocksize': 10485760,
'build_json_dir': 'inputs',
'koji_keytab': False,
'koji_principal': False,
'koji_upload_dir': 'upload',
'kojihub': 'http://koji.example.com/kojihub',
'url': '/',
'use_auth': False,
'verify_ssl': False
}
assert match_args == args
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
def test_koji_import(self, osbs, scratch):
additional_params = {
'base_image': 'fedora:latest',
'koji_upload_dir': 'upload',
}
if scratch:
additional_params['scratch'] = True
params, build_json = self.get_orchestrator_build_request(osbs, additional_params)
plugins = get_plugins_from_build_json(build_json)
if scratch:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'exit_plugins', 'koji_import')
return
args = plugin_value_get(plugins, 'exit_plugins',
'koji_import', 'args')
match_args = {
'koji_keytab': False,
'kojihub': 'http://koji.example.com/kojihub',
'url': '/',
'use_auth': False,
'verify_ssl': False
}
assert match_args == args
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
def test_fetch_worker_metadata(self, osbs, scratch):
additional_params = {
'base_image': 'fedora:latest',
}
if scratch:
additional_params['scratch'] = True
params, build_json = self.get_orchestrator_build_request(osbs, additional_params)
plugins = get_plugins_from_build_json(build_json)
if scratch:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'postbuild_plugins', 'fetch_worker_metadata')
return
args = plugin_value_get(plugins, 'postbuild_plugins',
'fetch_worker_metadata', 'args')
match_args = {}
assert match_args == args
@pytest.mark.parametrize('triggers', [False, True]) # noqa:F811
def test_check_and_set_rebuild(self, tmpdir, osbs, triggers):
imagechange = [
{
"type": "ImageChange",
"imageChange": {
"from": {
"kind": "ImageStreamTag",
"name": "{{BASE_IMAGE_STREAM}}"
}
}
}
]
if triggers:
orch_outer_temp = ORCHESTRATOR_INNER_TEMPLATE.format(
arrangement_version=self.ARRANGEMENT_VERSION
)
for basename in [ORCHESTRATOR_OUTER_TEMPLATE, orch_outer_temp]:
shutil.copy(os.path.join(INPUTS_PATH, basename),
os.path.join(str(tmpdir), basename))
with open(os.path.join(str(tmpdir), ORCHESTRATOR_OUTER_TEMPLATE), 'r+') as orch_json:
build_json = json.load(orch_json)
build_json['spec']['triggers'] = imagechange
orch_json.seek(0)
json.dump(build_json, orch_json)
orch_json.truncate()
flexmock(osbs.os_conf, get_build_json_store=lambda: str(tmpdir))
(flexmock(BuildRequest)
.should_receive('adjust_for_repo_info')
.and_return(True))
additional_params = {
'base_image': 'fedora:latest',
}
params, build_json = self.get_orchestrator_build_request(osbs, additional_params)
plugins = get_plugins_from_build_json(build_json)
if not triggers:
with pytest.raises(NoSuchPluginException):
get_plugin(plugins, 'prebuild_plugins', 'check_and_set_rebuild')
return
args = plugin_value_get(plugins, 'prebuild_plugins',
'check_and_set_rebuild', 'args')
match_args = {
"label_key": "is_autorebuild",
"label_value": "true",
"url": "/",
"verify_ssl": False,
'use_auth': False,
}
assert match_args == args
class TestArrangementV4(TestArrangementV3):
"""
Orchestrator build differences from arrangement version 3:
- tag_from_config enabled
- pulp_tag enabled
- pulp_sync enabled
- pulp_sync takes an additional "publish":false argument
- pulp_publish enabled
- pulp_pull enabled
- group_manifests enabled
Worker build differences from arrangement version 3:
- tag_from_config takes "tag_suffixes" argument
- tag_by_labels disabled
- pulp_push takes an additional "publish":false argument
- pulp_sync disabled
- pulp_pull disabled
- delete_from_registry disabled
"""
ARRANGEMENT_VERSION = 4
DEFAULT_PLUGINS = {
# Changing this? Add test methods
ORCHESTRATOR_INNER_TEMPLATE: {
'prebuild_plugins': [
'reactor_config',
'add_filesystem',
'inject_parent_image',
'pull_base_image',
'bump_release',
'add_labels_in_dockerfile',
'koji_parent',
'check_and_set_rebuild',
],
'buildstep_plugins': [
'orchestrate_build',
],
'prepublish_plugins': [
],
'postbuild_plugins': [
'fetch_worker_metadata',
'tag_from_config',
'group_manifests',
'pulp_tag',
'pulp_sync',
],
'exit_plugins': [
'pulp_publish',
'pulp_pull',
'delete_from_registry',
'koji_import',
'koji_tag_build',
'store_metadata_in_osv3',
'sendmail',
'remove_built_image',
],
},
# Changing this? Add test methods
WORKER_INNER_TEMPLATE: {
'prebuild_plugins': [
'add_filesystem',
'inject_parent_image',
'pull_base_image',
'add_labels_in_dockerfile',
'change_from_in_dockerfile',
'add_help',
'add_dockerfile',
'distgit_fetch_artefacts',
'fetch_maven_artifacts',
'koji',
'add_yum_repo_by_url',
'inject_yum_repo',
'distribution_scope',
],
'buildstep_plugins': [
],
'prepublish_plugins': [
'squash',
],
'postbuild_plugins': [
'all_rpm_packages',
'tag_from_config',
'tag_and_push',
'pulp_push',
'compress',
'koji_upload',
],
'exit_plugins': [
'store_metadata_in_osv3',
'remove_built_image',
],
},
}
# Arrangement 4 is not yet ready to be default. Once it is, this skip
# decorator should be deleted and test_is_default from previous arrangement
# test collection removed.
@pytest.mark.skip('Arrangement 4 is not ready to be default!')
def test_is_default(self):
"""
Test this is the default arrangement
"""
# Note! If this test fails it probably means you need to
# derive a new TestArrangementV[n] class from this class and
# move the method to the new class.
assert DEFAULT_ARRANGEMENT_VERSION == self.ARRANGEMENT_VERSION
@pytest.mark.parametrize(('params', 'build_type', 'has_plat_tag', # noqa:F811
'has_primary_tag'), (
({}, 'orchestrator', False, True),
({'scratch': True}, 'orchestrator', False, False),
({'platform': 'x86_64'}, 'worker', True, False),
({'platform': 'x86_64', 'scratch': True}, 'worker', True, False),
))
def test_tag_from_config(self, osbs, params, build_type, has_plat_tag, has_primary_tag):
additional_params = {
'base_image': 'fedora:latest',
}
additional_params.update(params)
_, build_json = self.get_build_request(build_type, osbs, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'postbuild_plugins', 'tag_from_config', 'args')
assert set(args.keys()) == set(['tag_suffixes'])
assert set(args['tag_suffixes'].keys()) == set(['unique', 'primary'])
unique_tags = args['tag_suffixes']['unique']
assert len(unique_tags) == 1
unique_tag_suffix = ''
if has_plat_tag:
unique_tag_suffix = '-' + additional_params.get('platform')
assert unique_tags[0].endswith(unique_tag_suffix)
primary_tags = args['tag_suffixes']['primary']
if has_primary_tag:
assert set(primary_tags) == set(['latest', '{version}', '{version}-{release}'])
def test_pulp_push(self, openshift): # noqa:F811
platform_descriptors = {'x86_64': {'enable_v1': True}}
osbs_api = osbs_with_pulp(openshift, platform_descriptors=platform_descriptors)
additional_params = {
'base_image': 'fedora:latest',
}
_, build_json = self.get_worker_build_request(osbs_api, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'postbuild_plugins', 'pulp_push', 'args')
build_conf = osbs_api.build_conf
# Use first docker registry and strip off /v2
pulp_registry_name = build_conf.get_pulp_registry()
pulp_secret_path = '/'.join([SECRETS_PATH, build_conf.get_pulp_secret()])
expected_args = {
'pulp_registry_name': pulp_registry_name,
'pulp_secret_path': pulp_secret_path,
'load_exported_image': True,
'dockpulp_loglevel': 'INFO',
'publish': False
}
assert args == expected_args
def test_pulp_tag(self, osbs_with_pulp): # noqa:F811
additional_params = {
'base_image': 'fedora:latest',
}
_, build_json = self.get_orchestrator_build_request(osbs_with_pulp, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'postbuild_plugins', 'pulp_tag', 'args')
assert args == {}
def test_pulp_sync(self, osbs_with_pulp): # noqa:F811
additional_params = {
'base_image': 'fedora:latest',
}
_, build_json = self.get_orchestrator_build_request(osbs_with_pulp, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'postbuild_plugins', 'pulp_sync', 'args')
build_conf = osbs_with_pulp.build_conf
docker_registry = self.get_pulp_sync_registry(build_conf)
pulp_registry_name = build_conf.get_pulp_registry()
pulp_secret_path = '/'.join([SECRETS_PATH, build_conf.get_pulp_secret()])
expected_args = {
'docker_registry': docker_registry,
'pulp_registry_name': pulp_registry_name,
'pulp_secret_path': pulp_secret_path,
'dockpulp_loglevel': 'INFO',
'publish': False
}
assert args == expected_args
def test_pulp_publish(self, osbs_with_pulp): # noqa:F811
additional_params = {
'base_image': 'fedora:latest',
}
_, build_json = self.get_orchestrator_build_request(osbs_with_pulp, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'exit_plugins', 'pulp_publish', 'args')
expected_args = {}
assert args == expected_args
def test_pulp_pull(self, osbs_with_pulp): # noqa:F811
additional_params = {
'base_image': 'fedora:latest',
}
_, build_json = self.get_orchestrator_build_request(osbs_with_pulp, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'exit_plugins', 'pulp_pull', 'args')
expected_args = {'insecure': True}
assert args == expected_args
@pytest.mark.parametrize('scratch', [False, True]) # noqa:F811
@pytest.mark.parametrize('base_image', ['koji/image-build', 'foo'])
def test_delete_from_registry(self, osbs_with_pulp, base_image, scratch):
phase = 'exit_plugins'
plugin = 'delete_from_registry'
additional_params = {
'base_image': base_image,
}
if scratch:
additional_params['scratch'] = True
_, build_json = self.get_orchestrator_build_request(osbs_with_pulp, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, phase, plugin, 'args')
docker_registry = self.get_pulp_sync_registry(osbs_with_pulp.build_conf)
assert args == {'registries': {docker_registry: {'insecure': True}}}
def test_group_manifests(self, openshift): # noqa:F811
platform_descriptors = {'x86_64': {'architecture': 'amd64'}}
osbs_api = osbs_with_pulp(openshift, platform_descriptors=platform_descriptors)
additional_params = {
'base_image': 'fedora:latest',
}
_, build_json = self.get_orchestrator_build_request(osbs_api, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'postbuild_plugins', 'group_manifests', 'args')
expected_args = {
'goarch': {'x86_64': 'amd64'},
'group': False,
'pulp_registry_name': osbs_api.build_conf.get_pulp_registry()
}
assert args == expected_args
@pytest.mark.parametrize('build_type', ( # noqa:F811
'orchestrator',
'worker',
))
def test_inject_parent_image(self, osbs, build_type):
additional_params = {
'base_image': 'foo',
'koji_parent_build': 'fedora-26-9',
}
_, build_json = self.get_build_request(build_type, osbs, additional_params)
plugins = get_plugins_from_build_json(build_json)
args = plugin_value_get(plugins, 'prebuild_plugins', 'inject_parent_image', 'args')
expected_args = {
'koji_parent_build': 'fedora-26-9',
'koji_hub': osbs.build_conf.get_kojihub()
}
assert args == expected_args
| bsd-3-clause |
mykytamorachov/outpost | flask/lib/python2.7/site-packages/blinker/_utilities.py | 144 | 4457 | from weakref import ref
from blinker._saferef import BoundMethodWeakref
try:
callable
except NameError:
def callable(object):
return hasattr(object, '__call__')
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
try:
from contextlib import contextmanager
except ImportError:
def contextmanager(fn):
def oops(*args, **kw):
raise RuntimeError("Python 2.5 or above is required to use "
"context managers.")
oops.__name__ = fn.__name__
return oops
class _symbol(object):
def __init__(self, name):
"""Construct a new named symbol."""
self.__name__ = self.name = name
def __reduce__(self):
return symbol, (self.name,)
def __repr__(self):
return self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
foo
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
"""
symbols = {}
def __new__(cls, name):
try:
return cls.symbols[name]
except KeyError:
return cls.symbols.setdefault(name, _symbol(name))
try:
text = (str, unicode)
except NameError:
text = str
def hashable_identity(obj):
if hasattr(obj, '__func__'):
return (id(obj.__func__), id(obj.__self__))
elif hasattr(obj, 'im_func'):
return (id(obj.im_func), id(obj.im_self))
elif isinstance(obj, text):
return obj
else:
return id(obj)
WeakTypes = (ref, BoundMethodWeakref)
class annotatable_weakref(ref):
"""A weakref.ref that supports custom instance attributes."""
def reference(object, callback=None, **annotations):
"""Return an annotated weak ref."""
if callable(object):
weak = callable_reference(object, callback)
else:
weak = annotatable_weakref(object, callback)
for key, value in annotations.items():
setattr(weak, key, value)
return weak
def callable_reference(object, callback=None):
"""Return an annotated weak ref, supporting bound instance methods."""
if hasattr(object, 'im_self') and object.im_self is not None:
return BoundMethodWeakref(target=object, on_delete=callback)
elif hasattr(object, '__self__') and object.__self__ is not None:
return BoundMethodWeakref(target=object, on_delete=callback)
return annotatable_weakref(object, callback)
class lazy_property(object):
"""A @property that is only evaluated once."""
def __init__(self, deferred):
self._deferred = deferred
self.__doc__ = deferred.__doc__
def __get__(self, obj, cls):
if obj is None:
return self
value = self._deferred(obj)
setattr(obj, self._deferred.__name__, value)
return value
| gpl-2.0 |
vinodkc/spark | examples/src/main/python/sql/streaming/structured_network_wordcount.py | 27 | 2500 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
Counts words in UTF8 encoded, '\n' delimited text received from the network.
Usage: structured_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Structured Streaming
would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/sql/streaming/structured_network_wordcount.py
localhost 9999`
"""
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: structured_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
host = sys.argv[1]
port = int(sys.argv[2])
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCount")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
lines = spark\
.readStream\
.format('socket')\
.option('host', host)\
.option('port', port)\
.load()
# Split the lines into words
words = lines.select(
# explode turns each item in an array into a separate row
explode(
split(lines.value, ' ')
).alias('word')
)
# Generate running word count
wordCounts = words.groupBy('word').count()
# Start running the query that prints the running counts to the console
query = wordCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.start()
query.awaitTermination()
| apache-2.0 |
szeged/servo | tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/packages/hpack/hpack_compat.py | 40 | 3580 | # -*- coding: utf-8 -*-
"""
hpack/hpack_compat
~~~~~~~~~~~~~~~~~~
Provides an abstraction layer over two HPACK implementations.
This module has a pure-Python greenfield HPACK implementation that can be used
on all Python platforms. However, this implementation is both slower and more
memory-hungry than could be achieved with a C-language version. Additionally,
nghttp2's HPACK implementation currently achieves better compression ratios
than hyper's in almost all benchmarks.
For those who care about efficiency and speed in HPACK, this module allows you
to use nghttp2's HPACK implementation instead of ours. This module detects
whether the nghttp2 bindings are installed, and if they are it wraps them in
a hpack-compatible API and uses them instead of its own. If not, it falls back
to the built-in Python bindings.
"""
import logging
from .hpack import _to_bytes
log = logging.getLogger(__name__)
# Attempt to import nghttp2.
try:
import nghttp2
USE_NGHTTP2 = True
log.debug("Using nghttp2's HPACK implementation.")
except ImportError:
USE_NGHTTP2 = False
log.debug("Using our pure-Python HPACK implementation.")
if USE_NGHTTP2:
class Encoder(object):
"""
An HPACK encoder object. This object takes HTTP headers and emits
encoded HTTP/2 header blocks.
"""
def __init__(self):
self._e = nghttp2.HDDeflater()
@property
def header_table_size(self):
"""
Returns the header table size. For the moment this isn't
useful, so we don't use it.
"""
raise NotImplementedError()
@header_table_size.setter
def header_table_size(self, value):
log.debug("Setting header table size to %d", value)
self._e.change_table_size(value)
def encode(self, headers, huffman=True):
"""
Encode the headers. The huffman parameter has no effect, it is
simply present for compatibility.
"""
log.debug("HPACK encoding %s", headers)
# Turn the headers into a list of tuples if possible. This is the
# natural way to interact with them in HPACK.
if isinstance(headers, dict):
headers = headers.items()
# Next, walk across the headers and turn them all into bytestrings.
headers = [(_to_bytes(n), _to_bytes(v)) for n, v in headers]
# Now, let nghttp2 do its thing.
header_block = self._e.deflate(headers)
return header_block
class Decoder(object):
"""
An HPACK decoder object.
"""
def __init__(self):
self._d = nghttp2.HDInflater()
@property
def header_table_size(self):
"""
Returns the header table size. For the moment this isn't
useful, so we don't use it.
"""
raise NotImplementedError()
@header_table_size.setter
def header_table_size(self, value):
log.debug("Setting header table size to %d", value)
self._d.change_table_size(value)
def decode(self, data):
"""
Takes an HPACK-encoded header block and decodes it into a header
set.
"""
log.debug("Decoding %s", data)
headers = self._d.inflate(data)
return [(n.decode('utf-8'), v.decode('utf-8')) for n, v in headers]
else:
# Grab the built-in encoder and decoder.
from .hpack import Encoder, Decoder
| mpl-2.0 |
akiss77/servo | tests/wpt/css-tests/tools/manifest/tests/test_manifest.py | 59 | 6506 | import platform
import os
import mock
import hypothesis as h
import hypothesis.strategies as hs
import pytest
from .. import manifest, item, sourcefile, utils
def SourceFileWithTest(path, hash, cls, *args):
s = mock.Mock(rel_path=path, hash=hash)
test = cls(s, utils.rel_path_to_url(path), *args)
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
@hs.composite
def rel_dir_file_path(draw):
length = draw(hs.integers(min_value=1, max_value=20))
if length == 1:
return "a"
else:
remaining = length - 2
if os.path.sep == "/":
alphabet = "a/"
elif os.path.sep == "\\":
alphabet = "a/\\"
else:
assert False, "uhhhh, this platform is weird"
mid = draw(hs.text(alphabet=alphabet, min_size=remaining, max_size=remaining))
return os.path.normcase("a" + mid + "a")
@hs.composite
def sourcefile_strategy(draw):
item_classes = [item.TestharnessTest, item.RefTest, item.RefTestNode,
item.ManualTest, item.Stub, item.WebdriverSpecTest,
item.ConformanceCheckerTest, item.SupportFile]
cls = draw(hs.sampled_from(item_classes))
path = draw(rel_dir_file_path())
hash = draw(hs.text(alphabet="0123456789abcdef", min_size=40, max_size=40))
s = mock.Mock(rel_path=path, hash=hash)
if cls in (item.RefTest, item.RefTestNode):
ref_path = draw(rel_dir_file_path())
h.assume(path != ref_path)
ref_eq = draw(hs.sampled_from(["==", "!="]))
test = cls(s, utils.rel_path_to_url(path), [(utils.rel_path_to_url(ref_path), ref_eq)])
elif cls is item.SupportFile:
test = cls(s)
else:
test = cls(s, utils.rel_path_to_url(path))
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10, max_size=1000,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.ConformanceCheckerTest)])
def test_manifest_to_json(s):
m = manifest.Manifest()
assert m.update(s) is True
json_str = m.to_json()
loaded = manifest.Manifest.from_json("/", json_str)
assert list(loaded) == list(m)
assert loaded.to_json() == json_str
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.TestharnessTest)])
@h.example([SourceFileWithTest("a", "0"*40, item.RefTest, [("/aa", "==")])])
def test_manifest_idempotent(s):
m = manifest.Manifest()
assert m.update(s) is True
m1 = list(m)
assert m.update(s) is False
assert list(m) == m1
def test_manifest_to_json_forwardslash():
m = manifest.Manifest()
s = SourceFileWithTest("a/b", "0"*40, item.TestharnessTest)
assert m.update([s]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
def test_manifest_to_json_backslash():
m = manifest.Manifest()
s = SourceFileWithTest("a\\b", "0"*40, item.TestharnessTest)
if os.path.sep == "\\":
assert m.update([s]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
else:
with pytest.raises(ValueError):
# one of these must raise ValueError
# the first must return True if it doesn't raise
assert m.update([s]) is True
m.to_json()
def test_manifest_from_json_backslash():
json_obj = {
'paths': {
'a\\b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a\\b': [['/a/b', {}]]
}
}
}
with pytest.raises(ValueError):
manifest.Manifest.from_json("/", json_obj)
def test_reftest_computation_chain():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
m.update([s1, s2])
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_add():
m = manifest.Manifest()
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
test2 = s2.manifest_items()[1][0]
assert m.update([s2]) is True
assert list(m) == [("reftest", test2.path, {test2})]
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
test1 = s1.manifest_items()[1][0]
# s2's hash is unchanged, but it has gone from a test to a node
assert m.update([s1, s2]) is True
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_remove():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
assert m.update([s1, s2]) is True
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
# s2's hash is unchanged, but it has gone from a node to a test
assert m.update([s2]) is True
assert list(m) == [("reftest", test2.path, {test2})]
| mpl-2.0 |
crazy-canux/django | tests/prefetch_related/models.py | 255 | 7972 | import uuid
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Basic tests
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50, unique=True)
first_book = models.ForeignKey('Book', models.CASCADE, related_name='first_time_authors')
favorite_authors = models.ManyToManyField(
'self', through='FavoriteAuthors', symmetrical=False, related_name='favors_me')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
class AuthorWithAge(Author):
author = models.OneToOneField(Author, models.CASCADE, parent_link=True)
age = models.IntegerField()
class FavoriteAuthors(models.Model):
author = models.ForeignKey(Author, models.CASCADE, to_field='name', related_name='i_like')
likes_author = models.ForeignKey(Author, models.CASCADE, to_field='name', related_name='likes_me')
class Meta:
ordering = ['id']
@python_2_unicode_compatible
class AuthorAddress(models.Model):
author = models.ForeignKey(Author, models.CASCADE, to_field='name', related_name='addresses')
address = models.TextField()
class Meta:
ordering = ['id']
def __str__(self):
return self.address
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=255)
authors = models.ManyToManyField(Author, related_name='books')
def __str__(self):
return self.title
class Meta:
ordering = ['id']
class BookWithYear(Book):
book = models.OneToOneField(Book, models.CASCADE, parent_link=True)
published_year = models.IntegerField()
aged_authors = models.ManyToManyField(
AuthorWithAge, related_name='books_with_year')
class Bio(models.Model):
author = models.OneToOneField(Author, models.CASCADE)
books = models.ManyToManyField(Book, blank=True)
@python_2_unicode_compatible
class Reader(models.Model):
name = models.CharField(max_length=50)
books_read = models.ManyToManyField(Book, related_name='read_by')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
class BookReview(models.Model):
book = models.ForeignKey(BookWithYear, models.CASCADE)
notes = models.TextField(null=True, blank=True)
# Models for default manager tests
class Qualification(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['id']
class TeacherManager(models.Manager):
def get_queryset(self):
return super(TeacherManager, self).get_queryset().prefetch_related('qualifications')
@python_2_unicode_compatible
class Teacher(models.Model):
name = models.CharField(max_length=50)
qualifications = models.ManyToManyField(Qualification)
objects = TeacherManager()
def __str__(self):
return "%s (%s)" % (self.name, ", ".join(q.name for q in self.qualifications.all()))
class Meta:
ordering = ['id']
class Department(models.Model):
name = models.CharField(max_length=50)
teachers = models.ManyToManyField(Teacher)
class Meta:
ordering = ['id']
# GenericRelation/GenericForeignKey tests
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(
ContentType,
models.CASCADE,
related_name="taggeditem_set2",
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
created_by_ct = models.ForeignKey(
ContentType,
models.SET_NULL,
null=True,
related_name='taggeditem_set3',
)
created_by_fkey = models.PositiveIntegerField(null=True)
created_by = GenericForeignKey('created_by_ct', 'created_by_fkey',)
favorite_ct = models.ForeignKey(
ContentType,
models.SET_NULL,
null=True,
related_name='taggeditem_set4',
)
favorite_fkey = models.CharField(max_length=64, null=True)
favorite = GenericForeignKey('favorite_ct', 'favorite_fkey')
def __str__(self):
return self.tag
class Meta:
ordering = ['id']
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem, related_query_name='bookmarks')
favorite_tags = GenericRelation(TaggedItem,
content_type_field='favorite_ct',
object_id_field='favorite_fkey',
related_query_name='favorite_bookmarks')
class Meta:
ordering = ['id']
class Comment(models.Model):
comment = models.TextField()
# Content-object field
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_pk = models.TextField()
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
class Meta:
ordering = ['id']
# Models for lookup ordering tests
class House(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=255)
owner = models.ForeignKey('Person', models.SET_NULL, null=True)
main_room = models.OneToOneField('Room', models.SET_NULL, related_name='main_room_of', null=True)
class Meta:
ordering = ['id']
class Room(models.Model):
name = models.CharField(max_length=50)
house = models.ForeignKey(House, models.CASCADE, related_name='rooms')
class Meta:
ordering = ['id']
class Person(models.Model):
name = models.CharField(max_length=50)
houses = models.ManyToManyField(House, related_name='occupants')
@property
def primary_house(self):
# Assume business logic forces every person to have at least one house.
return sorted(self.houses.all(), key=lambda house: -house.rooms.count())[0]
@property
def all_houses(self):
return list(self.houses.all())
class Meta:
ordering = ['id']
# Models for nullable FK tests
@python_2_unicode_compatible
class Employee(models.Model):
name = models.CharField(max_length=50)
boss = models.ForeignKey('self', models.SET_NULL, null=True, related_name='serfs')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
# Ticket #19607
@python_2_unicode_compatible
class LessonEntry(models.Model):
name1 = models.CharField(max_length=200)
name2 = models.CharField(max_length=200)
def __str__(self):
return "%s %s" % (self.name1, self.name2)
@python_2_unicode_compatible
class WordEntry(models.Model):
lesson_entry = models.ForeignKey(LessonEntry, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return "%s (%s)" % (self.name, self.id)
# Ticket #21410: Regression when related_name="+"
@python_2_unicode_compatible
class Author2(models.Model):
name = models.CharField(max_length=50, unique=True)
first_book = models.ForeignKey('Book', models.CASCADE, related_name='first_time_authors+')
favorite_books = models.ManyToManyField('Book', related_name='+')
def __str__(self):
return self.name
class Meta:
ordering = ['id']
# Models for many-to-many with UUID pk test:
class Pet(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=20)
people = models.ManyToManyField(Person, related_name='pets')
class Flea(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
current_room = models.ForeignKey(Room, models.SET_NULL, related_name='fleas', null=True)
pets_visited = models.ManyToManyField(Pet, related_name='fleas_hosted')
people_visited = models.ManyToManyField(Person, related_name='fleas_hosted')
| bsd-3-clause |
nlfiedler/mal | python/step6_file.py | 57 | 3096 | import sys, traceback
import mal_readline
import mal_types as types
import reader, printer
from env import Env
import core
# read
def READ(str):
return reader.read_str(str)
# eval
def eval_ast(ast, env):
if types._symbol_Q(ast):
return env.get(ast)
elif types._list_Q(ast):
return types._list(*map(lambda a: EVAL(a, env), ast))
elif types._vector_Q(ast):
return types._vector(*map(lambda a: EVAL(a, env), ast))
elif types._hash_map_Q(ast):
keyvals = []
for k in ast.keys():
keyvals.append(EVAL(k, env))
keyvals.append(EVAL(ast[k], env))
return types._hash_map(*keyvals)
else:
return ast # primitive value, return unchanged
def EVAL(ast, env):
while True:
#print("EVAL %s" % printer._pr_str(ast))
if not types._list_Q(ast):
return eval_ast(ast, env)
# apply list
if len(ast) == 0: return ast
a0 = ast[0]
if "def!" == a0:
a1, a2 = ast[1], ast[2]
res = EVAL(a2, env)
return env.set(a1, res)
elif "let*" == a0:
a1, a2 = ast[1], ast[2]
let_env = Env(env)
for i in range(0, len(a1), 2):
let_env.set(a1[i], EVAL(a1[i+1], let_env))
ast = a2
env = let_env
# Continue loop (TCO)
elif "do" == a0:
eval_ast(ast[1:-1], env)
ast = ast[-1]
# Continue loop (TCO)
elif "if" == a0:
a1, a2 = ast[1], ast[2]
cond = EVAL(a1, env)
if cond is None or cond is False:
if len(ast) > 3: ast = ast[3]
else: ast = None
else:
ast = a2
# Continue loop (TCO)
elif "fn*" == a0:
a1, a2 = ast[1], ast[2]
return types._function(EVAL, Env, a2, env, a1)
else:
el = eval_ast(ast, env)
f = el[0]
if hasattr(f, '__ast__'):
ast = f.__ast__
env = f.__gen_env__(el[1:])
else:
return f(*el[1:])
# print
def PRINT(exp):
return printer._pr_str(exp)
# repl
repl_env = Env()
def REP(str):
return PRINT(EVAL(READ(str), repl_env))
# core.py: defined using python
for k, v in core.ns.items(): repl_env.set(types._symbol(k), v)
repl_env.set(types._symbol('eval'), lambda ast: EVAL(ast, repl_env))
repl_env.set(types._symbol('*ARGV*'), types._list(*sys.argv[2:]))
# core.mal: defined using the language itself
REP("(def! not (fn* (a) (if a false true)))")
REP("(def! load-file (fn* (f) (eval (read-string (str \"(do \" (slurp f) \")\")))))")
if len(sys.argv) >= 2:
REP('(load-file "' + sys.argv[1] + '")')
sys.exit(0)
# repl loop
while True:
try:
line = mal_readline.readline("user> ")
if line == None: break
if line == "": continue
print(REP(line))
except reader.Blank: continue
except Exception as e:
print("".join(traceback.format_exception(*sys.exc_info())))
| mpl-2.0 |
agconti/njode | env/lib/python2.7/site-packages/pygments/lexers/rdf.py | 72 | 3790 | # -*- coding: utf-8 -*-
"""
pygments.lexers.rdf
~~~~~~~~~~~~~~~~~~~
Lexers for semantic web and RDF query languages and markup.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default
from pygments.token import Keyword, Punctuation, String, Number, Operator, \
Whitespace, Name, Literal, Comment, Text
__all__ = ['SparqlLexer']
class SparqlLexer(RegexLexer):
"""
Lexer for `SPARQL <http://www.w3.org/TR/rdf-sparql-query/>`_ query language.
.. versionadded:: 2.0
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Whitespace),
(r'(select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from named|from|order\s+by|limit|'
r'offset|bindings|load|clear|drop|create|add|move|copy|'
r'insert\s+data|delete\s+data|delete\s+where|delete|insert|'
r'using named|using|graph|default|named|all|optional|service|'
r'silent|bind|union|not in|in|as|a)', Keyword),
(r'(prefix|base)(\s+)([a-z][\w-]*)(\s*)(\:)',
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
Punctuation)),
(r'\?[a-z_]\w*', Name.Variable),
(r'<[^>]+>', Name.Label),
(r'([a-z][\w-]*)(\:)([a-z][\w-]*)',
bygroups(Name.Namespace, Punctuation, Name.Tag)),
(r'(str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|'
r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
r'isliteral|isnumeric|regex|substr|replace|exists|not exists|'
r'count|sum|min|max|avg|sample|group_concat|separator)\b',
Name.Function),
(r'(true|false)', Literal),
(r'[+\-]?\d*\.\d+', Number.Float),
(r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
(r'[+\-]?\d+', Number.Integer),
(r'(\|\||&&|=|\*|\-|\+|/)', Operator),
(r'[(){}.;,:^]', Punctuation),
(r'#[^\n]+', Comment),
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'.', String, '#pop'),
],
'end-of-string': [
(r'(@)([a-z]+(:?-[a-z0-9]+)*)',
bygroups(Operator, Name.Function), '#pop:2'),
(r'\^\^', Operator, '#pop:2'),
default('#pop:2'),
],
}
| bsd-3-clause |
okwow123/djangol2 | example/env/lib/python2.7/site-packages/allauth/socialaccount/providers/soundcloud/provider.py | 10 | 1029 | from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class SoundCloudAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('permalink_url')
def get_avatar_url(self):
return self.account.extra_data.get('avatar_url')
def to_str(self):
dflt = super(SoundCloudAccount, self).to_str()
full_name = self.account.extra_data.get('full_name')
username = self.account.extra_data.get('username')
return full_name or username or dflt
class SoundCloudProvider(OAuth2Provider):
id = 'soundcloud'
name = 'SoundCloud'
account_class = SoundCloudAccount
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(name=data.get('full_name'),
username=data.get('username'),
email=data.get('email'))
provider_classes = [SoundCloudProvider]
| mit |
espadrine/opera | chromium/src/third_party/scons-2.0.1/engine/SCons/Tool/dvi.py | 61 | 2388 | """SCons.Tool.dvi
Common DVI Builder definition for various other Tool modules that use it.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvi.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Builder
import SCons.Tool
DVIBuilder = None
def generate(env):
try:
env['BUILDERS']['DVI']
except KeyError:
global DVIBuilder
if DVIBuilder is None:
# The suffix is hard-coded to '.dvi', not configurable via a
# construction variable like $DVISUFFIX, because the output
# file name is hard-coded within TeX.
DVIBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.LaTeXScanner,
suffix = '.dvi',
emitter = {},
source_ext_match = None)
env['BUILDERS']['DVI'] = DVIBuilder
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
filipposantovito/suds-jurko | suds/client.py | 9 | 30972 | # This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Service proxy implementation providing access to web services.
"""
import suds
from suds import *
import suds.bindings.binding
from suds.builder import Builder
import suds.cache
import suds.metrics as metrics
from suds.options import Options
from suds.plugin import PluginContainer
from suds.properties import Unskin
from suds.reader import DefinitionsReader
from suds.resolver import PathResolver
from suds.sax.document import Document
import suds.sax.parser
from suds.servicedefinition import ServiceDefinition
import suds.transport
import suds.transport.https
from suds.umx.basic import Basic as UmxBasic
from suds.wsdl import Definitions
import sudsobject
from cookielib import CookieJar
from copy import deepcopy
import httplib
from logging import getLogger
log = getLogger(__name__)
class Client(UnicodeMixin):
"""
A lightweight web service client.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
"""
@classmethod
def items(cls, sobject):
"""
Extract I{items} from a suds object.
Much like the items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A dictionary of items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = suds.transport.https.HttpAuthenticated()
self.options = options
if "cache" not in kwargs:
kwargs["cache"] = suds.cache.ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
Useful for cases when a WSDL and referenced XSD schemas make heavy use
of namespaces and those namespaces are subject to change.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: prefix already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are unique to the
cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
return clone
def __unicode__(self):
s = ["\n"]
s.append("Suds ( https://fedorahosted.org/suds/ )")
s.append(" version: %s" % (suds.__version__,))
if suds.__build__:
s.append(" build: %s" % (suds.__build__,))
for sd in self.sd:
s.append("\n\n%s" % (unicode(sd),))
return "".join(s)
class Factory:
"""
A factory for instantiating types defined in the WSDL.
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
Create a WSDL type by name.
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = sudsobject.Factory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug("%s created: %s", name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
Most WSDLs only define a single service in which case access by subscript
is passed through to a L{PortSelector}. This is also the behavior when a
I{default} service has been specified. In cases where multiple services
have been defined and no default has been specified, the service is found
by name (or index) and a L{PortSelector} for the service is returned. In
all cases, attribute access is forwarded to the L{PortSelector} for either
the I{first} service or the I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{WSDL} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{WSDL} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Attribute access is forwarded to the L{PortSelector}.
Uses the I{default} service if specified or the I{first} service
otherwise.
@param name: Method name.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides I{service} selection by name (string) or index (integer).
In cases where only a single service is defined or a I{default} has
been specified, the request is forwarded to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: int|str
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: int|str
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not self.__services:
raise Exception, "No services defined"
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, "at [%d]" % (name,)
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is not None:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector} for
the port is returned. In all cases, attribute access is forwarded to the
L{MethodSelector} for either the I{first} port or the I{default} port (when
specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Attribute access is forwarded to the L{MethodSelector}.
Uses the I{default} port when specified or the I{first} port otherwise.
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides I{port} selection by name (string) or index (integer).
In cases where only a single port is defined or a I{default} has been
specified, the request is forwarded to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: int|str
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: int|str
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not self.__ports:
raise Exception, "No ports defined: %s" % (self.__qn,)
if isinstance(name, int):
qn = "%s[%d]" % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = ".".join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = ".".join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is not None:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = ".".join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{WSDL} method.
@type I{raw} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""Invoke the method."""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
try:
return client.invoke(args, kwargs)
except WebFault, e:
if self.faults():
raise
return httplib.INTERNAL_SERVER_ERROR, e
def faults(self):
"""Get faults option."""
return self.client.options.faults
def clientclass(self, kwargs):
"""Get SOAP client class."""
if _SimClient.simulation(kwargs):
return _SimClient
return _SoapClient
class RequestContext:
"""
A request context.
Returned by a suds Client when invoking a web service operation with the
``nosend`` enabled. Allows the caller to take care of sending the request
himself and return back the reply data for further processing.
@ivar envelope: The SOAP request envelope.
@type envelope: I{bytes}
"""
def __init__(self, process_reply, envelope):
"""
@param process_reply: A callback for processing a user defined reply.
@type process_reply: I{callable}
@param envelope: The SOAP request envelope.
@type envelope: I{bytes}
"""
self.__process_reply = process_reply
self.envelope = envelope
def process_reply(self, reply, status=None, description=None):
"""
Re-entry for processing a successful reply.
Depending on how the ``retxml`` option is set, may return the SOAP
reply XML or process it and return the Python object representing the
returned value.
@param reply: The SOAP reply envelope.
@type reply: I{bytes}
@param status: The HTTP status code.
@type status: int
@param description: Additional status description.
@type description: I{bytes}
@return: The invoked web service operation return value.
@rtype: I{builtin}|I{subclass of} L{Object}|I{bytes}|I{None}
"""
return self.__process_reply(reply, status, description)
class _SoapClient:
"""
An internal lightweight SOAP based web service operation client.
Each instance is constructed for specific web service operation and knows
how to:
- Construct a SOAP request for it.
- Transport a SOAP request for it using a configured transport.
- Receive a SOAP reply using a configured transport.
- Process the received SOAP reply.
Depending on the given suds options, may do all the tasks listed above or
may stop the process at an earlier point and return some intermediate
result, e.g. the constructed SOAP request or the raw received SOAP reply.
See the invoke() method for more detailed information.
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Invoke a specified web service method.
Depending on how the ``nosend`` & ``retxml`` options are set, may do
one of the following:
* Return a constructed web service operation SOAP request without
sending it to the web service.
* Invoke the web service operation and return its SOAP reply XML.
* Invoke the web service operation, process its results and return
the Python object representing the returned value.
When returning a SOAP request, the request is wrapped inside a
RequestContext object allowing the user to acquire a corresponding SOAP
reply himself and then pass it back to suds for further processing.
Constructed request data is automatically processed using registered
plugins and serialized into a byte-string. Exact request XML formatting
may be affected by the ``prettyxml`` suds option.
@param args: A list of args for the method invoked.
@type args: list|tuple
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: SOAP request, SOAP reply or a web service return value.
@rtype: L{RequestContext}|I{builtin}|I{subclass of} L{Object}|I{bytes}|
I{None}
"""
timer = metrics.Timer()
timer.start()
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
method_name = self.method.name
metrics.log.debug("message for '%s' created: %s", method_name, timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug("method '%s' invoked: %s", method_name, timer)
return result
def send(self, soapenv):
"""
Send SOAP message.
Depending on how the ``nosend`` & ``retxml`` options are set, may do
one of the following:
* Return a constructed web service operation request without sending
it to the web service.
* Invoke the web service operation and return its SOAP reply XML.
* Invoke the web service operation, process its results and return
the Python object representing the returned value.
@param soapenv: A SOAP envelope to send.
@type soapenv: L{Document}
@return: SOAP request, SOAP reply or a web service return value.
@rtype: L{RequestContext}|I{builtin}|I{subclass of} L{Object}|I{bytes}|
I{None}
"""
location = self.__location()
log.debug("sending to (%s)\nmessage:\n%s", location, soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if self.options.prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode("utf-8")
ctx = plugins.message.sending(envelope=soapenv)
soapenv = ctx.envelope
if self.options.nosend:
return RequestContext(self.process_reply, soapenv)
request = suds.transport.Request(location, soapenv)
request.headers = self.__headers()
try:
timer = metrics.Timer()
timer.start()
reply = self.options.transport.send(request)
timer.stop()
metrics.log.debug("waited %s on server reply", timer)
except suds.transport.TransportError, e:
content = e.fp and e.fp.read() or ""
return self.process_reply(content, e.httpcode, tostr(e))
return self.process_reply(reply.message, None, None)
def process_reply(self, reply, status, description):
"""
Process a web service operation SOAP reply.
Depending on how the ``retxml`` option is set, may return the SOAP
reply XML or process it and return the Python object representing the
returned value.
@param reply: The SOAP reply envelope.
@type reply: I{bytes}
@param status: The HTTP status code (None indicates httplib.OK).
@type status: int|I{None}
@param description: Additional status description.
@type description: str
@return: The invoked web service operation return value.
@rtype: I{builtin}|I{subclass of} L{Object}|I{bytes}|I{None}
"""
if status is None:
status = httplib.OK
debug_message = "Reply HTTP status - %d" % (status,)
if status in (httplib.ACCEPTED, httplib.NO_CONTENT):
log.debug(debug_message)
return
#TODO: Consider whether and how to allow plugins to handle error,
# httplib.ACCEPTED & httplib.NO_CONTENT replies as well as successful
# ones.
if status == httplib.OK:
log.debug("%s\n%s", debug_message, reply)
else:
log.debug("%s - %s\n%s", debug_message, description, reply)
plugins = PluginContainer(self.options.plugins)
ctx = plugins.message.received(reply=reply)
reply = ctx.reply
# SOAP standard states that SOAP errors must be accompanied by HTTP
# status code 500 - internal server error:
#
# From SOAP 1.1 specification:
# In case of a SOAP error while processing the request, the SOAP HTTP
# server MUST issue an HTTP 500 "Internal Server Error" response and
# include a SOAP message in the response containing a SOAP Fault
# element (see section 4.4) indicating the SOAP processing error.
#
# From WS-I Basic profile:
# An INSTANCE MUST use a "500 Internal Server Error" HTTP status code
# if the response message is a SOAP Fault.
replyroot = None
if status in (httplib.OK, httplib.INTERNAL_SERVER_ERROR):
replyroot = _parse(reply)
plugins.message.parsed(reply=replyroot)
fault = self.__get_fault(replyroot)
if fault:
if status != httplib.INTERNAL_SERVER_ERROR:
log.warn("Web service reported a SOAP processing fault "
"using an unexpected HTTP status code %d. Reporting "
"as an internal server error.", status)
if self.options.faults:
raise WebFault(fault, replyroot)
return httplib.INTERNAL_SERVER_ERROR, fault
if status != httplib.OK:
if self.options.faults:
#TODO: Use a more specific exception class here.
raise Exception((status, description))
return status, description
if self.options.retxml:
return reply
result = replyroot and self.method.binding.output.get_reply(
self.method, replyroot)
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
return httplib.OK, result
def __get_fault(self, replyroot):
"""
Extract fault information from a SOAP reply.
Returns an I{unmarshalled} fault L{Object} or None in case the given
XML document does not contain a SOAP <Fault> element.
@param replyroot: A SOAP reply message root XML element or None.
@type replyroot: L{Element}|I{None}
@return: A fault object.
@rtype: L{Object}
"""
envns = suds.bindings.binding.envns
soapenv = replyroot and replyroot.getChild("Envelope", envns)
soapbody = soapenv and soapenv.getChild("Body", envns)
fault = soapbody and soapbody.getChild("Fault", envns)
return fault is not None and UmxBasic().process(fault)
def __headers(self):
"""
Get HTTP headers for a HTTP/HTTPS SOAP request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
if isinstance(action, unicode):
action = action.encode("utf-8")
result = {
"Content-Type": "text/xml; charset=utf-8",
"SOAPAction": action}
result.update(**self.options.headers)
log.debug("headers = %s", result)
return result
def __location(self):
"""Returns the SOAP request's target location URL."""
return Unskin(self.options).get("location", self.method.location)
class _SimClient(_SoapClient):
"""
Loopback _SoapClient used for SOAP request/reply simulation.
Used when a web service operation is invoked with injected SOAP request or
reply data.
"""
__injkey = "__inject"
@classmethod
def simulation(cls, kwargs):
"""Get whether injected data has been specified in I{kwargs}."""
return kwargs.has_key(_SimClient.__injkey)
def invoke(self, args, kwargs):
"""
Invoke a specified web service method.
Uses an injected SOAP request/response instead of a regularly
constructed/received one.
Depending on how the ``nosend`` & ``retxml`` options are set, may do
one of the following:
* Return a constructed web service operation request without sending
it to the web service.
* Invoke the web service operation and return its SOAP reply XML.
* Invoke the web service operation, process its results and return
the Python object representing the returned value.
@param args: Positional arguments for the method invoked.
@type args: list|tuple
@param kwargs: Keyword arguments for the method invoked.
@type kwargs: dict
@return: SOAP request, SOAP reply or a web service return value.
@rtype: L{RequestContext}|I{builtin}|I{subclass of} L{Object}|I{bytes}|
I{None}
"""
simulation = kwargs.pop(self.__injkey)
msg = simulation.get("msg")
if msg is not None:
assert msg.__class__ is suds.byte_str_class
return self.send(_parse(msg))
msg = self.method.binding.input.get_message(self.method, args, kwargs)
log.debug("inject (simulated) send message:\n%s", msg)
reply = simulation.get("reply")
if reply is not None:
assert reply.__class__ is suds.byte_str_class
status = simulation.get("status")
description = simulation.get("description")
if description is None:
description = "injected reply"
return self.process_reply(reply, status, description)
raise Exception("reply or msg injection parameter expected")
def _parse(string):
"""
Parses given XML document content.
Returns the resulting root XML element node or None if the given XML
content is empty.
@param string: XML document content to parse.
@type string: I{bytes}
@return: Resulting root XML element node or None.
@rtype: L{Element}|I{None}
"""
if string:
return suds.sax.parser.Parser().parse(string=string)
| lgpl-3.0 |
davogler/POSTv3 | accounts/views.py | 1 | 4504 | from django.shortcuts import render, HttpResponseRedirect, Http404
from django.contrib.auth import logout, login, authenticate
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.core.exceptions import ObjectDoesNotExist
from django import forms
from django.views.generic import ListView, DetailView, TemplateView
from django.conf import settings
from django.contrib.auth import get_user_model
User = get_user_model()
from accounts.forms import LoginForm, SignupForm
from orders.models import Order
from cart.models import Cart, CartItem
# Create your views here.
def logout_view(request):
print "logging out"
logout(request)
return HttpResponseRedirect('%s'%(reverse("auth_login")))
def login_view(request):
form = LoginForm(request.POST or None)
btn = "Login"
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
login(request, user)
next = request.GET.get('next', '')
messages.success(request, "Successfully Logged In. Welcome Back!")
if next:
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
context = {
"form": form,
"submit_btn": btn,
}
return render(request, "accounts/login-signup.html", context)
def signup(request):
form = SignupForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data['email']
password = form.cleaned_data['password']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
email = form.cleaned_data['email']
next = request.POST.get('next', '')
# check for duplicate username/email
try:
user = User.objects.get(username=username)
messages.error(request, "Whoops, this user exists already. Did you forget your password?")
# whoops user exists
return HttpResponseRedirect(reverse("signup"))
except:
user = User.objects.create_user(
username=username,
password=password,
first_name=first_name,
last_name=last_name,
email=email
)
user = authenticate(username=username, password=password)
# don't log them in.
login(request, user)
next = request.GET.get('next', '')
messages.success(request, "Thank you, you've successfully signed up for an account! You are now logged in")
if next:
# here is where signup happened mid-buy. make sure recips get tagged with user.
the_cart_id = request.session['cart_id']
cart = Cart.objects.get(id=the_cart_id)
print "we have a cart mid buy"
subbies = CartItem.subbie_type.filter(cart=cart)
order = Order.objects.get(cart=cart)
print order
if subbies:
for sub in subbies:
recip = sub.recipient
print recip
if recip is not None:
print "we got recip in a sub"
recip.user = user
recip.save()
else:
pass
main_recipient = order.main_recipient
if main_recipient is not None:
main_recipient.user = user
print main_recipient
main_recipient.save()
else:
pass
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
context = {
"form": form,
}
template = "accounts/signup.html"
return render(request, template, context)
class DashTemplateView(TemplateView):
template_name = 'accounts/dashboard.html'
# def get_context_data(self, **kwargs):
# context = super(DashTemplateView, self).get_context_data(**kwargs)
# context['client'] = Client.objects.get(user=self.request.user)
# return context
| mit |
openstack/nova | nova/tests/unit/fake_processutils.py | 3 | 3472 | # Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This modules stubs out functions in oslo_concurrency.processutils."""
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
_fake_execute_repliers = []
_fake_execute_log = []
def fake_execute_get_log():
return _fake_execute_log
def fake_execute_clear_log():
global _fake_execute_log
_fake_execute_log = []
def fake_execute_set_repliers(repliers):
"""Allows the client to configure replies to commands."""
global _fake_execute_repliers
_fake_execute_repliers = repliers
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
"""A reply handler for commands that haven't been added to the reply list.
Returns empty strings for stdout and stderr.
"""
return '', ''
def fake_execute(*cmd_parts, **kwargs):
"""This function stubs out execute.
It optionally executes a preconfigured function to return expected data.
"""
global _fake_execute_repliers
process_input = kwargs.get('process_input', None)
check_exit_code = kwargs.get('check_exit_code', 0)
delay_on_retry = kwargs.get('delay_on_retry', True)
attempts = kwargs.get('attempts', 1)
run_as_root = kwargs.get('run_as_root', False)
cmd_str = ' '.join(str(part) for part in cmd_parts)
LOG.debug("Faking execution of cmd (subprocess): %s", cmd_str)
_fake_execute_log.append(cmd_str)
reply_handler = fake_execute_default_reply_handler
for fake_replier in _fake_execute_repliers:
if re.match(fake_replier[0], cmd_str):
reply_handler = fake_replier[1]
LOG.debug('Faked command matched %s', fake_replier[0])
break
if isinstance(reply_handler, str):
# If the reply handler is a string, return it as stdout
reply = reply_handler, ''
else:
try:
# Alternative is a function, so call it
reply = reply_handler(cmd_parts,
process_input=process_input,
delay_on_retry=delay_on_retry,
attempts=attempts,
run_as_root=run_as_root,
check_exit_code=check_exit_code)
except processutils.ProcessExecutionError as e:
LOG.debug('Faked command raised an exception %s', e)
raise
LOG.debug("Reply to faked command is stdout='%(stdout)s' "
"stderr='%(stderr)s'", {'stdout': reply[0], 'stderr': reply[1]})
# Replicate the sleep call in the real function
greenthread.sleep(0)
return reply
def stub_out_processutils_execute(test):
fake_execute_set_repliers([])
fake_execute_clear_log()
test.stub_out('oslo_concurrency.processutils.execute', fake_execute)
| apache-2.0 |
t-wissmann/qutebrowser | qutebrowser/keyinput/modeparsers.py | 1 | 12445 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""KeyChainParser for "hint" and "normal" modes.
Module attributes:
STARTCHARS: Possible chars for starting a commandline input.
"""
import typing
import traceback
import enum
from PyQt5.QtCore import pyqtSlot, Qt, QObject
from PyQt5.QtGui import QKeySequence, QKeyEvent
from qutebrowser.browser import hints
from qutebrowser.commands import cmdexc
from qutebrowser.config import config
from qutebrowser.keyinput import basekeyparser, keyutils, macros
from qutebrowser.utils import usertypes, log, message, objreg, utils
if typing.TYPE_CHECKING:
from qutebrowser.commands import runners
STARTCHARS = ":/?"
LastPress = enum.Enum('LastPress', ['none', 'filtertext', 'keystring'])
class CommandKeyParser(basekeyparser.BaseKeyParser):
"""KeyChainParser for command bindings.
Attributes:
_commandrunner: CommandRunner instance.
"""
def __init__(self, win_id: int,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
super().__init__(win_id, parent)
self._commandrunner = commandrunner
def execute(self, cmdstr: str, count: int = None) -> None:
try:
self._commandrunner.run(cmdstr, count)
except cmdexc.Error as e:
message.error(str(e), stack=traceback.format_exc())
class NormalKeyParser(CommandKeyParser):
"""KeyParser for normal mode with added STARTCHARS detection and more.
Attributes:
_partial_timer: Timer to clear partial keypresses.
"""
def __init__(self, win_id: int,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
super().__init__(win_id, commandrunner, parent)
self._read_config('normal')
self._partial_timer = usertypes.Timer(self, 'partial-match')
self._partial_timer.setSingleShot(True)
self._partial_timer.timeout.connect(self._clear_partial_match)
self._inhibited = False
self._inhibited_timer = usertypes.Timer(self, 'normal-inhibited')
self._inhibited_timer.setSingleShot(True)
def __repr__(self) -> str:
return utils.get_repr(self)
def handle(self, e: QKeyEvent, *,
dry_run: bool = False) -> QKeySequence.SequenceMatch:
"""Override to abort if the key is a startchar."""
txt = e.text().strip()
if self._inhibited:
self._debug_log("Ignoring key '{}', because the normal mode is "
"currently inhibited.".format(txt))
return QKeySequence.NoMatch
match = super().handle(e, dry_run=dry_run)
if match == QKeySequence.PartialMatch and not dry_run:
timeout = config.val.input.partial_timeout
if timeout != 0:
self._partial_timer.setInterval(timeout)
self._partial_timer.start()
return match
def set_inhibited_timeout(self, timeout: int) -> None:
"""Ignore keypresses for the given duration."""
if timeout != 0:
self._debug_log("Inhibiting the normal mode for {}ms.".format(
timeout))
self._inhibited = True
self._inhibited_timer.setInterval(timeout)
self._inhibited_timer.timeout.connect(self._clear_inhibited)
self._inhibited_timer.start()
@pyqtSlot()
def _clear_partial_match(self) -> None:
"""Clear a partial keystring after a timeout."""
self._debug_log("Clearing partial keystring {}".format(
self._sequence))
self._sequence = keyutils.KeySequence()
self.keystring_updated.emit(str(self._sequence))
@pyqtSlot()
def _clear_inhibited(self) -> None:
"""Reset inhibition state after a timeout."""
self._debug_log("Releasing inhibition state of normal mode.")
self._inhibited = False
class PassthroughKeyParser(CommandKeyParser):
"""KeyChainParser which passes through normal keys.
Used for insert/passthrough modes.
Attributes:
_mode: The mode this keyparser is for.
"""
do_log = False
passthrough = True
supports_count = False
def __init__(self, win_id: int,
mode: usertypes.KeyMode,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
"""Constructor.
Args:
mode: The mode this keyparser is for.
parent: Qt parent.
warn: Whether to warn if an ignored key was bound.
"""
super().__init__(win_id, commandrunner, parent)
self._read_config(mode.name)
self._mode = mode
def __repr__(self) -> str:
return utils.get_repr(self, mode=self._mode)
class PromptKeyParser(CommandKeyParser):
"""KeyParser for yes/no prompts."""
supports_count = False
def __init__(self, win_id: int,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
super().__init__(win_id, commandrunner, parent)
self._read_config('yesno')
def __repr__(self) -> str:
return utils.get_repr(self)
class HintKeyParser(CommandKeyParser):
"""KeyChainParser for hints.
Attributes:
_filtertext: The text to filter with.
_hintmanager: The HintManager to use.
_last_press: The nature of the last keypress, a LastPress member.
"""
supports_count = False
def __init__(self, win_id: int,
commandrunner: 'runners.CommandRunner',
hintmanager: hints.HintManager,
parent: QObject = None) -> None:
super().__init__(win_id, commandrunner, parent)
self._hintmanager = hintmanager
self._filtertext = ''
self._last_press = LastPress.none
self._read_config('hint')
self.keystring_updated.connect(self._hintmanager.handle_partial_key)
def _handle_filter_key(self, e: QKeyEvent) -> QKeySequence.SequenceMatch:
"""Handle keys for string filtering."""
log.keyboard.debug("Got filter key 0x{:x} text {}".format(
e.key(), e.text()))
if e.key() == Qt.Key_Backspace:
log.keyboard.debug("Got backspace, mode {}, filtertext '{}', "
"sequence '{}'".format(self._last_press,
self._filtertext,
self._sequence))
if self._last_press != LastPress.keystring and self._filtertext:
self._filtertext = self._filtertext[:-1]
self._hintmanager.filter_hints(self._filtertext)
return QKeySequence.ExactMatch
elif self._last_press == LastPress.keystring and self._sequence:
self._sequence = self._sequence[:-1]
self.keystring_updated.emit(str(self._sequence))
if not self._sequence and self._filtertext:
# Switch back to hint filtering mode (this can happen only
# in numeric mode after the number has been deleted).
self._hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return QKeySequence.ExactMatch
else:
return QKeySequence.NoMatch
elif self._hintmanager.current_mode() != 'number':
return QKeySequence.NoMatch
elif not e.text():
return QKeySequence.NoMatch
else:
self._filtertext += e.text()
self._hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return QKeySequence.ExactMatch
def handle(self, e: QKeyEvent, *,
dry_run: bool = False) -> QKeySequence.SequenceMatch:
"""Handle a new keypress and call the respective handlers."""
if dry_run:
return super().handle(e, dry_run=True)
if keyutils.is_special_hint_mode(Qt.Key(e.key()), e.modifiers()):
log.keyboard.debug("Got special key, clearing keychain")
self.clear_keystring()
assert not dry_run
match = super().handle(e)
if match == QKeySequence.PartialMatch:
self._last_press = LastPress.keystring
elif match == QKeySequence.ExactMatch:
self._last_press = LastPress.none
elif match == QKeySequence.NoMatch:
# We couldn't find a keychain so we check if it's a special key.
return self._handle_filter_key(e)
else:
raise ValueError("Got invalid match type {}!".format(match))
return match
def update_bindings(self, strings: typing.Sequence[str],
preserve_filter: bool = False) -> None:
"""Update bindings when the hint strings changed.
Args:
strings: A list of hint strings.
preserve_filter: Whether to keep the current value of
`self._filtertext`.
"""
self._read_config()
self.bindings.update({keyutils.KeySequence.parse(s):
'follow-hint -s ' + s for s in strings})
if not preserve_filter:
self._filtertext = ''
class CaretKeyParser(CommandKeyParser):
"""KeyParser for caret mode."""
passthrough = True
def __init__(self, win_id: int,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
super().__init__(win_id, commandrunner, parent)
self._read_config('caret')
class RegisterKeyParser(CommandKeyParser):
"""KeyParser for modes that record a register key.
Attributes:
_mode: One of KeyMode.set_mark, KeyMode.jump_mark, KeyMode.record_macro
and KeyMode.run_macro.
"""
supports_count = False
def __init__(self, win_id: int,
mode: usertypes.KeyMode,
commandrunner: 'runners.CommandRunner',
parent: QObject = None) -> None:
super().__init__(win_id, commandrunner, parent)
self._mode = mode
self._read_config('register')
def handle(self, e: QKeyEvent, *,
dry_run: bool = False) -> QKeySequence.SequenceMatch:
"""Override to always match the next key and use the register."""
match = super().handle(e, dry_run=dry_run)
if match or dry_run:
return match
if keyutils.is_special(Qt.Key(e.key()), e.modifiers()):
# this is not a proper register key, let it pass and keep going
return QKeySequence.NoMatch
key = e.text()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
try:
if self._mode == usertypes.KeyMode.set_mark:
tabbed_browser.set_mark(key)
elif self._mode == usertypes.KeyMode.jump_mark:
tabbed_browser.jump_mark(key)
elif self._mode == usertypes.KeyMode.record_macro:
macros.macro_recorder.record_macro(key)
elif self._mode == usertypes.KeyMode.run_macro:
macros.macro_recorder.run_macro(self._win_id, key)
else:
raise ValueError(
"{} is not a valid register mode".format(self._mode))
except cmdexc.Error as err:
message.error(str(err), stack=traceback.format_exc())
self.request_leave.emit(self._mode, "valid register key", True)
return QKeySequence.ExactMatch
| gpl-3.0 |
infoxchange/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/management/commands/ogrinspect.py | 324 | 6252 | import os, sys
from optparse import make_option
from django.contrib.gis import gdal
from django.contrib.gis.management.base import ArgsCommand, CommandError
def layer_option(option, opt, value, parser):
"""
Callback for `make_option` for the `ogrinspect` `layer_key`
keyword option which may be an integer or a string.
"""
try:
dest = int(value)
except ValueError:
dest = value
setattr(parser.values, option.dest, dest)
def list_option(option, opt, value, parser):
"""
Callback for `make_option` for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
if value.lower() == 'true':
dest = True
else:
dest = [s for s in value.split(',')]
setattr(parser.values, option.dest, dest)
class Command(ArgsCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
args = '[data_source] [model_name]'
option_list = ArgsCommand.option_list + (
make_option('--blank', dest='blank', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--decimal', dest='decimal', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.'),
make_option('--geom-name', dest='geom_name', type='string', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)'),
make_option('--layer', dest='layer_key', type='string', action='callback',
callback=layer_option, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.'),
make_option('--multi-geom', action='store_true', dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.'),
make_option('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__` function.'),
make_option('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` '
'statement.'),
make_option('--null', dest='null', type='string', action='callback',
callback=list_option, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set with'
'`true` to apply to all applicable fields.'),
make_option('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.'),
make_option('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
)
requires_model_validation = False
def handle_args(self, *args, **options):
try:
data_source, model_name = args
except ValueError:
raise CommandError('Invalid arguments, must provide: %s' % self.args)
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# TODO: Support non file-based OGR datasources.
if not os.path.isfile(data_source):
raise CommandError('The given data source cannot be found: "%s"' % data_source)
# Removing options with `None` values.
options = dict([(k, v) for k, v in options.items() if not v is None])
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.OGRException, msg:
raise CommandError(msg)
# Whether the user wants to generate the LayerMapping dictionary as well.
show_mapping = options.pop('mapping', False)
# Popping the verbosity global option, as it's not accepted by `_ogrinspect`.
verbosity = options.pop('verbosity', False)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
output = [s for s in _ogrinspect(ds, model_name, **options)]
if show_mapping:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name' : options['geom_name'],
'layer_key' : options['layer_key'],
'multi_geom' : options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = dict([(v, k) for k, v in mapping_dict.items()])
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend([" '%s' : '%s'," % (rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields])
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output)
| gpl-3.0 |
ivanhorvath/openshift-tools | ansible/roles/lib_oa_openshift/src/class/oc_configmap.py | 18 | 6202 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-arguments
class OCConfigMap(OpenShiftCLI):
''' Openshift ConfigMap Class
ConfigMaps are a way to store data inside of objects
'''
def __init__(self,
name,
from_file,
from_literal,
state,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCConfigMap, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = name
self.state = state
self._configmap = None
self._inc_configmap = None
self.from_file = from_file if from_file is not None else {}
self.from_literal = from_literal if from_literal is not None else {}
@property
def configmap(self):
if self._configmap is None:
self._configmap = self.get()
return self._configmap
@configmap.setter
def configmap(self, inc_map):
self._configmap = inc_map
@property
def inc_configmap(self):
if self._inc_configmap is None:
results = self.create(dryrun=True, output=True)
self._inc_configmap = results['results']
return self._inc_configmap
@inc_configmap.setter
def inc_configmap(self, inc_map):
self._inc_configmap = inc_map
def from_file_to_params(self):
'''return from_files in a string ready for cli'''
return ["--from-file={}={}".format(key, value) for key, value in self.from_file.items()]
def from_literal_to_params(self):
'''return from_literal in a string ready for cli'''
return ["--from-literal={}={}".format(key, value) for key, value in self.from_literal.items()]
def get(self):
'''return a configmap by name '''
results = self._get('configmap', self.name)
if results['returncode'] == 0 and results['results'][0]:
self.configmap = results['results'][0]
if results['returncode'] != 0 and '"{}" not found'.format(self.name) in results['stderr']:
results['returncode'] = 0
return results
def delete(self):
'''delete a configmap by name'''
return self._delete('configmap', self.name)
def create(self, dryrun=False, output=False):
'''Create a configmap
:dryrun: Product what you would have done. default: False
:output: Whether to parse output. default: False
'''
cmd = ['create', 'configmap', self.name]
if self.from_literal is not None:
cmd.extend(self.from_literal_to_params())
if self.from_file is not None:
cmd.extend(self.from_file_to_params())
if dryrun:
cmd.extend(['--dry-run', '-ojson'])
results = self.openshift_cmd(cmd, output=output)
return results
def update(self):
'''run update configmap '''
return self._replace_content('configmap', self.name, self.inc_configmap)
def needs_update(self):
'''compare the current configmap with the proposed and return if they are equal'''
return not Utils.check_def_equal(self.inc_configmap, self.configmap, debug=self.verbose)
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the oc_configmap module'''
oc_cm = OCConfigMap(params['name'],
params['from_file'],
params['from_literal'],
params['state'],
params['namespace'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_cm.get()
if 'failed' in api_rval:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
if not params['name']:
return {'failed': True,
'msg': 'Please specify a name when state is absent|present.'}
########
# Delete
########
if state == 'absent':
if not Utils.exists(api_rval['results'], params['name']):
return {'changed': False, 'state': 'absent'}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_cm.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Create
########
if state == 'present':
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
api_rval = oc_cm.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_cm.needs_update():
api_rval = oc_cm.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
api_rval = oc_cm.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
| apache-2.0 |
CyrilPeponnet/Archipel | ArchipelAgent/archipel-core/archipelcore/initinstallutils.py | 5 | 2738 | #!/usr/bin/python -W ignore::DeprecationWarning
# -*- coding: utf-8 -*-
#
# initinstallutils.py
#
# Copyright (C) 2013 Nicolas Ochem <nicolas.ochem@free.fr>
# Copyright (C) 2013 Antoine Mercadal <antoine.mercadal@archipelproject.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
this file contains methods common to all init-install files
"""
import sqlite3
import sys
def success(msg):
"""
Print a standardized success message
@type msg: String
@param msg: the message to print
"""
print "\033[32mSUCCESS: %s\033[0m" % msg
def warn(msg):
"""
Print a standardized warning message
@type msg: String
@param msg: the message to print
"""
print "\033[33mWARNING: %s\033[0m" % msg
def error(msg, exit=True):
"""
Print a standardized success message
@type msg: String
@param msg: the message to print
@type exit: Boolean
@param exit: if True, exit after print
"""
print "\033[31mERROR: %s\033[0m" % msg
if exit:
sys.exit(1)
def msg(msg, exit=True):
"""
Print a standardized neutral message
@type msg: String
@param msg: the message to print
@type exit: Boolean
@param exit: if True, exit after print
"""
print "\033[35mMESSAGE: %s\033[0m" % msg
def ask(message, answers=None, default=None):
question = " * " + message
if answers and default:
question += " ["
for a in answers:
a = a
if default and a in (default): a = "\033[32m" + a + "\033[0m"
question += a + "/"
question = question[:-1]
question += "]"
if not answers and default:
question += " [\033[32m" + default + "\033[0m]"
question += " : "
resp = raw_input(question)
if default:
if resp == "": resp = default;
if answers and default:
if not resp in answers and len(answers) > 0:
resp = ask("\033[33mYou must select of the following answer\033[0m", answers, default);
return resp
def ask_bool(message, default="y"):
if ask(message, ["y", "n"], default) == "y":
return True
return False
| agpl-3.0 |
kevclarx/ansible | lib/ansible/modules/notification/campfire.py | 70 | 5339 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: campfire
version_added: "1.2"
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
options:
subscription:
description:
- The subscription name to use.
required: true
token:
description:
- API token.
required: true
room:
description:
- Room number to which the message should be sent.
required: true
msg:
description:
- The message body.
required: true
notify:
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ ]
author: "Adam Garside (@fabulops)"
'''
EXAMPLES = '''
- campfire:
subscription: foo
token: 12345
room: 123
msg: Task completed.
- campfire:
subscription: foo
token: 12345
room: 123
notify: loggins
msg: Task completed ... with feeling.
'''
import cgi
def main():
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
# Hack to add basic auth username and password the way fetch_url expects
module.params['url_username'] = token
module.params['url_password'] = 'X'
target_url = '%s/room/%s/speak.xml' % (URI, room)
headers = {'Content-Type': 'application/xml',
'User-agent': AGENT}
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(notify, info['status']))
# Send the message
response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, info['status']))
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
dmsimard/ansible | test/support/integration/plugins/module_utils/crypto.py | 37 | 101762 | # -*- coding: utf-8 -*-
#
# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ----------------------------------------------------------------------
# A clearly marked portion of this file is licensed under the BSD license
# Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
# Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
# For more details, search for the function _obj2txt().
# ---------------------------------------------------------------------
# A clearly marked portion of this file is extracted from a project that
# is licensed under the Apache License 2.0
# Copyright (c) the OpenSSL contributors
# For more details, search for the function _OID_MAP.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
from distutils.version import LooseVersion
try:
import OpenSSL
from OpenSSL import crypto
except ImportError:
# An error will be raised in the calling class to let the end
# user know that OpenSSL couldn't be found.
pass
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend as cryptography_backend
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
import ipaddress
# Older versions of cryptography (< 2.1) do not have __hash__ functions for
# general name objects (DNSName, IPAddress, ...), while providing overloaded
# equality and string representation operations. This makes it impossible to
# use them in hash-based data structures such as set or dict. Since we are
# actually doing that in openssl_certificate, and potentially in other code,
# we need to monkey-patch __hash__ for these classes to make sure our code
# works fine.
if LooseVersion(cryptography.__version__) < LooseVersion('2.1'):
# A very simply hash function which relies on the representation
# of an object to be implemented. This is the case since at least
# cryptography 1.0, see
# https://github.com/pyca/cryptography/commit/7a9abce4bff36c05d26d8d2680303a6f64a0e84f
def simple_hash(self):
return hash(repr(self))
# The hash functions for the following types were added for cryptography 2.1:
# https://github.com/pyca/cryptography/commit/fbfc36da2a4769045f2373b004ddf0aff906cf38
x509.DNSName.__hash__ = simple_hash
x509.DirectoryName.__hash__ = simple_hash
x509.GeneralName.__hash__ = simple_hash
x509.IPAddress.__hash__ = simple_hash
x509.OtherName.__hash__ = simple_hash
x509.RegisteredID.__hash__ = simple_hash
if LooseVersion(cryptography.__version__) < LooseVersion('1.2'):
# The hash functions for the following types were added for cryptography 1.2:
# https://github.com/pyca/cryptography/commit/b642deed88a8696e5f01ce6855ccf89985fc35d0
# https://github.com/pyca/cryptography/commit/d1b5681f6db2bde7a14625538bd7907b08dfb486
x509.RFC822Name.__hash__ = simple_hash
x509.UniformResourceIdentifier.__hash__ = simple_hash
# Test whether we have support for X25519, X448, Ed25519 and/or Ed448
try:
import cryptography.hazmat.primitives.asymmetric.x25519
CRYPTOGRAPHY_HAS_X25519 = True
try:
cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes
CRYPTOGRAPHY_HAS_X25519_FULL = True
except AttributeError:
CRYPTOGRAPHY_HAS_X25519_FULL = False
except ImportError:
CRYPTOGRAPHY_HAS_X25519 = False
CRYPTOGRAPHY_HAS_X25519_FULL = False
try:
import cryptography.hazmat.primitives.asymmetric.x448
CRYPTOGRAPHY_HAS_X448 = True
except ImportError:
CRYPTOGRAPHY_HAS_X448 = False
try:
import cryptography.hazmat.primitives.asymmetric.ed25519
CRYPTOGRAPHY_HAS_ED25519 = True
except ImportError:
CRYPTOGRAPHY_HAS_ED25519 = False
try:
import cryptography.hazmat.primitives.asymmetric.ed448
CRYPTOGRAPHY_HAS_ED448 = True
except ImportError:
CRYPTOGRAPHY_HAS_ED448 = False
HAS_CRYPTOGRAPHY = True
except ImportError:
# Error handled in the calling module.
CRYPTOGRAPHY_HAS_X25519 = False
CRYPTOGRAPHY_HAS_X25519_FULL = False
CRYPTOGRAPHY_HAS_X448 = False
CRYPTOGRAPHY_HAS_ED25519 = False
CRYPTOGRAPHY_HAS_ED448 = False
HAS_CRYPTOGRAPHY = False
import abc
import base64
import binascii
import datetime
import errno
import hashlib
import os
import re
import tempfile
from ansible.module_utils import six
from ansible.module_utils._text import to_native, to_bytes, to_text
class OpenSSLObjectError(Exception):
pass
class OpenSSLBadPassphraseError(OpenSSLObjectError):
pass
def get_fingerprint_of_bytes(source):
"""Generate the fingerprint of the given bytes."""
fingerprint = {}
try:
algorithms = hashlib.algorithms
except AttributeError:
try:
algorithms = hashlib.algorithms_guaranteed
except AttributeError:
return None
for algo in algorithms:
f = getattr(hashlib, algo)
try:
h = f(source)
except ValueError:
# This can happen for hash algorithms not supported in FIPS mode
# (https://github.com/ansible/ansible/issues/67213)
continue
try:
# Certain hash functions have a hexdigest() which expects a length parameter
pubkey_digest = h.hexdigest()
except TypeError:
pubkey_digest = h.hexdigest(32)
fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2))
return fingerprint
def get_fingerprint(path, passphrase=None, content=None, backend='pyopenssl'):
"""Generate the fingerprint of the public key. """
privatekey = load_privatekey(path, passphrase=passphrase, content=content, check_passphrase=False, backend=backend)
if backend == 'pyopenssl':
try:
publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey)
except AttributeError:
# If PyOpenSSL < 16.0 crypto.dump_publickey() will fail.
try:
bio = crypto._new_mem_buf()
rc = crypto._lib.i2d_PUBKEY_bio(bio, privatekey._pkey)
if rc != 1:
crypto._raise_current_error()
publickey = crypto._bio_to_string(bio)
except AttributeError:
# By doing this we prevent the code from raising an error
# yet we return no value in the fingerprint hash.
return None
elif backend == 'cryptography':
publickey = privatekey.public_key().public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
return get_fingerprint_of_bytes(publickey)
def load_file_if_exists(path, module=None, ignore_errors=False):
try:
with open(path, 'rb') as f:
return f.read()
except EnvironmentError as exc:
if exc.errno == errno.ENOENT:
return None
if ignore_errors:
return None
if module is None:
raise
module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
except Exception as exc:
if ignore_errors:
return None
if module is None:
raise
module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
def load_privatekey(path, passphrase=None, check_passphrase=True, content=None, backend='pyopenssl'):
"""Load the specified OpenSSL private key.
The content can also be specified via content; in that case,
this function will not load the key from disk.
"""
try:
if content is None:
with open(path, 'rb') as b_priv_key_fh:
priv_key_detail = b_priv_key_fh.read()
else:
priv_key_detail = content
if backend == 'pyopenssl':
# First try: try to load with real passphrase (resp. empty string)
# Will work if this is the correct passphrase, or the key is not
# password-protected.
try:
result = crypto.load_privatekey(crypto.FILETYPE_PEM,
priv_key_detail,
to_bytes(passphrase or ''))
except crypto.Error as e:
if len(e.args) > 0 and len(e.args[0]) > 0:
if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
# This happens in case we have the wrong passphrase.
if passphrase is not None:
raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key!')
else:
raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
raise OpenSSLObjectError('Error while deserializing key: {0}'.format(e))
if check_passphrase:
# Next we want to make sure that the key is actually protected by
# a passphrase (in case we did try the empty string before, make
# sure that the key is not protected by the empty string)
try:
crypto.load_privatekey(crypto.FILETYPE_PEM,
priv_key_detail,
to_bytes('y' if passphrase == 'x' else 'x'))
if passphrase is not None:
# Since we can load the key without an exception, the
# key isn't password-protected
raise OpenSSLBadPassphraseError('Passphrase provided, but private key is not password-protected!')
except crypto.Error as e:
if passphrase is None and len(e.args) > 0 and len(e.args[0]) > 0:
if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
# The key is obviously protected by the empty string.
# Don't do this at home (if it's possible at all)...
raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
elif backend == 'cryptography':
try:
result = load_pem_private_key(priv_key_detail,
None if passphrase is None else to_bytes(passphrase),
cryptography_backend())
except TypeError as dummy:
raise OpenSSLBadPassphraseError('Wrong or empty passphrase provided for private key')
except ValueError as dummy:
raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key')
return result
except (IOError, OSError) as exc:
raise OpenSSLObjectError(exc)
def load_certificate(path, content=None, backend='pyopenssl'):
"""Load the specified certificate."""
try:
if content is None:
with open(path, 'rb') as cert_fh:
cert_content = cert_fh.read()
else:
cert_content = content
if backend == 'pyopenssl':
return crypto.load_certificate(crypto.FILETYPE_PEM, cert_content)
elif backend == 'cryptography':
return x509.load_pem_x509_certificate(cert_content, cryptography_backend())
except (IOError, OSError) as exc:
raise OpenSSLObjectError(exc)
def load_certificate_request(path, content=None, backend='pyopenssl'):
"""Load the specified certificate signing request."""
try:
if content is None:
with open(path, 'rb') as csr_fh:
csr_content = csr_fh.read()
else:
csr_content = content
except (IOError, OSError) as exc:
raise OpenSSLObjectError(exc)
if backend == 'pyopenssl':
return crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_content)
elif backend == 'cryptography':
return x509.load_pem_x509_csr(csr_content, cryptography_backend())
def parse_name_field(input_dict):
"""Take a dict with key: value or key: list_of_values mappings and return a list of tuples"""
result = []
for key in input_dict:
if isinstance(input_dict[key], list):
for entry in input_dict[key]:
result.append((key, entry))
else:
result.append((key, input_dict[key]))
return result
def convert_relative_to_datetime(relative_time_string):
"""Get a datetime.datetime or None from a string in the time format described in sshd_config(5)"""
parsed_result = re.match(
r"^(?P<prefix>[+-])((?P<weeks>\d+)[wW])?((?P<days>\d+)[dD])?((?P<hours>\d+)[hH])?((?P<minutes>\d+)[mM])?((?P<seconds>\d+)[sS]?)?$",
relative_time_string)
if parsed_result is None or len(relative_time_string) == 1:
# not matched or only a single "+" or "-"
return None
offset = datetime.timedelta(0)
if parsed_result.group("weeks") is not None:
offset += datetime.timedelta(weeks=int(parsed_result.group("weeks")))
if parsed_result.group("days") is not None:
offset += datetime.timedelta(days=int(parsed_result.group("days")))
if parsed_result.group("hours") is not None:
offset += datetime.timedelta(hours=int(parsed_result.group("hours")))
if parsed_result.group("minutes") is not None:
offset += datetime.timedelta(
minutes=int(parsed_result.group("minutes")))
if parsed_result.group("seconds") is not None:
offset += datetime.timedelta(
seconds=int(parsed_result.group("seconds")))
if parsed_result.group("prefix") == "+":
return datetime.datetime.utcnow() + offset
else:
return datetime.datetime.utcnow() - offset
def get_relative_time_option(input_string, input_name, backend='cryptography'):
"""Return an absolute timespec if a relative timespec or an ASN1 formatted
string is provided.
The return value will be a datetime object for the cryptography backend,
and a ASN1 formatted string for the pyopenssl backend."""
result = to_native(input_string)
if result is None:
raise OpenSSLObjectError(
'The timespec "%s" for %s is not valid' %
input_string, input_name)
# Relative time
if result.startswith("+") or result.startswith("-"):
result_datetime = convert_relative_to_datetime(result)
if backend == 'pyopenssl':
return result_datetime.strftime("%Y%m%d%H%M%SZ")
elif backend == 'cryptography':
return result_datetime
# Absolute time
if backend == 'pyopenssl':
return input_string
elif backend == 'cryptography':
for date_fmt in ['%Y%m%d%H%M%SZ', '%Y%m%d%H%MZ', '%Y%m%d%H%M%S%z', '%Y%m%d%H%M%z']:
try:
return datetime.datetime.strptime(result, date_fmt)
except ValueError:
pass
raise OpenSSLObjectError(
'The time spec "%s" for %s is invalid' %
(input_string, input_name)
)
def select_message_digest(digest_string):
digest = None
if digest_string == 'sha256':
digest = hashes.SHA256()
elif digest_string == 'sha384':
digest = hashes.SHA384()
elif digest_string == 'sha512':
digest = hashes.SHA512()
elif digest_string == 'sha1':
digest = hashes.SHA1()
elif digest_string == 'md5':
digest = hashes.MD5()
return digest
def write_file(module, content, default_mode=None, path=None):
'''
Writes content into destination file as securely as possible.
Uses file arguments from module.
'''
# Find out parameters for file
file_args = module.load_file_common_arguments(module.params, path=path)
if file_args['mode'] is None:
file_args['mode'] = default_mode
# Create tempfile name
tmp_fd, tmp_name = tempfile.mkstemp(prefix=b'.ansible_tmp')
try:
os.close(tmp_fd)
except Exception as dummy:
pass
module.add_cleanup_file(tmp_name) # if we fail, let Ansible try to remove the file
try:
try:
# Create tempfile
file = os.open(tmp_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
os.write(file, content)
os.close(file)
except Exception as e:
try:
os.remove(tmp_name)
except Exception as dummy:
pass
module.fail_json(msg='Error while writing result into temporary file: {0}'.format(e))
# Update destination to wanted permissions
if os.path.exists(file_args['path']):
module.set_fs_attributes_if_different(file_args, False)
# Move tempfile to final destination
module.atomic_move(tmp_name, file_args['path'])
# Try to update permissions again
module.set_fs_attributes_if_different(file_args, False)
except Exception as e:
try:
os.remove(tmp_name)
except Exception as dummy:
pass
module.fail_json(msg='Error while writing result: {0}'.format(e))
@six.add_metaclass(abc.ABCMeta)
class OpenSSLObject(object):
def __init__(self, path, state, force, check_mode):
self.path = path
self.state = state
self.force = force
self.name = os.path.basename(path)
self.changed = False
self.check_mode = check_mode
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
def _check_state():
return os.path.exists(self.path)
def _check_perms(module):
file_args = module.load_file_common_arguments(module.params)
return not module.set_fs_attributes_if_different(file_args, False)
if not perms_required:
return _check_state()
return _check_state() and _check_perms(module)
@abc.abstractmethod
def dump(self):
"""Serialize the object into a dictionary."""
pass
@abc.abstractmethod
def generate(self):
"""Generate the resource."""
pass
def remove(self, module):
"""Remove the resource from the filesystem."""
try:
os.remove(self.path)
self.changed = True
except OSError as exc:
if exc.errno != errno.ENOENT:
raise OpenSSLObjectError(exc)
else:
pass
# #####################################################################################
# #####################################################################################
# This has been extracted from the OpenSSL project's objects.txt:
# https://github.com/openssl/openssl/blob/9537fe5757bb07761fa275d779bbd40bcf5530e4/crypto/objects/objects.txt
# Extracted with https://gist.github.com/felixfontein/376748017ad65ead093d56a45a5bf376
#
# In case the following data structure has any copyrightable content, note that it is licensed as follows:
# Copyright (c) the OpenSSL contributors
# Licensed under the Apache License 2.0
# https://github.com/openssl/openssl/blob/master/LICENSE
_OID_MAP = {
'0': ('itu-t', 'ITU-T', 'ccitt'),
'0.3.4401.5': ('ntt-ds', ),
'0.3.4401.5.3.1.9': ('camellia', ),
'0.3.4401.5.3.1.9.1': ('camellia-128-ecb', 'CAMELLIA-128-ECB'),
'0.3.4401.5.3.1.9.3': ('camellia-128-ofb', 'CAMELLIA-128-OFB'),
'0.3.4401.5.3.1.9.4': ('camellia-128-cfb', 'CAMELLIA-128-CFB'),
'0.3.4401.5.3.1.9.6': ('camellia-128-gcm', 'CAMELLIA-128-GCM'),
'0.3.4401.5.3.1.9.7': ('camellia-128-ccm', 'CAMELLIA-128-CCM'),
'0.3.4401.5.3.1.9.9': ('camellia-128-ctr', 'CAMELLIA-128-CTR'),
'0.3.4401.5.3.1.9.10': ('camellia-128-cmac', 'CAMELLIA-128-CMAC'),
'0.3.4401.5.3.1.9.21': ('camellia-192-ecb', 'CAMELLIA-192-ECB'),
'0.3.4401.5.3.1.9.23': ('camellia-192-ofb', 'CAMELLIA-192-OFB'),
'0.3.4401.5.3.1.9.24': ('camellia-192-cfb', 'CAMELLIA-192-CFB'),
'0.3.4401.5.3.1.9.26': ('camellia-192-gcm', 'CAMELLIA-192-GCM'),
'0.3.4401.5.3.1.9.27': ('camellia-192-ccm', 'CAMELLIA-192-CCM'),
'0.3.4401.5.3.1.9.29': ('camellia-192-ctr', 'CAMELLIA-192-CTR'),
'0.3.4401.5.3.1.9.30': ('camellia-192-cmac', 'CAMELLIA-192-CMAC'),
'0.3.4401.5.3.1.9.41': ('camellia-256-ecb', 'CAMELLIA-256-ECB'),
'0.3.4401.5.3.1.9.43': ('camellia-256-ofb', 'CAMELLIA-256-OFB'),
'0.3.4401.5.3.1.9.44': ('camellia-256-cfb', 'CAMELLIA-256-CFB'),
'0.3.4401.5.3.1.9.46': ('camellia-256-gcm', 'CAMELLIA-256-GCM'),
'0.3.4401.5.3.1.9.47': ('camellia-256-ccm', 'CAMELLIA-256-CCM'),
'0.3.4401.5.3.1.9.49': ('camellia-256-ctr', 'CAMELLIA-256-CTR'),
'0.3.4401.5.3.1.9.50': ('camellia-256-cmac', 'CAMELLIA-256-CMAC'),
'0.9': ('data', ),
'0.9.2342': ('pss', ),
'0.9.2342.19200300': ('ucl', ),
'0.9.2342.19200300.100': ('pilot', ),
'0.9.2342.19200300.100.1': ('pilotAttributeType', ),
'0.9.2342.19200300.100.1.1': ('userId', 'UID'),
'0.9.2342.19200300.100.1.2': ('textEncodedORAddress', ),
'0.9.2342.19200300.100.1.3': ('rfc822Mailbox', 'mail'),
'0.9.2342.19200300.100.1.4': ('info', ),
'0.9.2342.19200300.100.1.5': ('favouriteDrink', ),
'0.9.2342.19200300.100.1.6': ('roomNumber', ),
'0.9.2342.19200300.100.1.7': ('photo', ),
'0.9.2342.19200300.100.1.8': ('userClass', ),
'0.9.2342.19200300.100.1.9': ('host', ),
'0.9.2342.19200300.100.1.10': ('manager', ),
'0.9.2342.19200300.100.1.11': ('documentIdentifier', ),
'0.9.2342.19200300.100.1.12': ('documentTitle', ),
'0.9.2342.19200300.100.1.13': ('documentVersion', ),
'0.9.2342.19200300.100.1.14': ('documentAuthor', ),
'0.9.2342.19200300.100.1.15': ('documentLocation', ),
'0.9.2342.19200300.100.1.20': ('homeTelephoneNumber', ),
'0.9.2342.19200300.100.1.21': ('secretary', ),
'0.9.2342.19200300.100.1.22': ('otherMailbox', ),
'0.9.2342.19200300.100.1.23': ('lastModifiedTime', ),
'0.9.2342.19200300.100.1.24': ('lastModifiedBy', ),
'0.9.2342.19200300.100.1.25': ('domainComponent', 'DC'),
'0.9.2342.19200300.100.1.26': ('aRecord', ),
'0.9.2342.19200300.100.1.27': ('pilotAttributeType27', ),
'0.9.2342.19200300.100.1.28': ('mXRecord', ),
'0.9.2342.19200300.100.1.29': ('nSRecord', ),
'0.9.2342.19200300.100.1.30': ('sOARecord', ),
'0.9.2342.19200300.100.1.31': ('cNAMERecord', ),
'0.9.2342.19200300.100.1.37': ('associatedDomain', ),
'0.9.2342.19200300.100.1.38': ('associatedName', ),
'0.9.2342.19200300.100.1.39': ('homePostalAddress', ),
'0.9.2342.19200300.100.1.40': ('personalTitle', ),
'0.9.2342.19200300.100.1.41': ('mobileTelephoneNumber', ),
'0.9.2342.19200300.100.1.42': ('pagerTelephoneNumber', ),
'0.9.2342.19200300.100.1.43': ('friendlyCountryName', ),
'0.9.2342.19200300.100.1.44': ('uniqueIdentifier', 'uid'),
'0.9.2342.19200300.100.1.45': ('organizationalStatus', ),
'0.9.2342.19200300.100.1.46': ('janetMailbox', ),
'0.9.2342.19200300.100.1.47': ('mailPreferenceOption', ),
'0.9.2342.19200300.100.1.48': ('buildingName', ),
'0.9.2342.19200300.100.1.49': ('dSAQuality', ),
'0.9.2342.19200300.100.1.50': ('singleLevelQuality', ),
'0.9.2342.19200300.100.1.51': ('subtreeMinimumQuality', ),
'0.9.2342.19200300.100.1.52': ('subtreeMaximumQuality', ),
'0.9.2342.19200300.100.1.53': ('personalSignature', ),
'0.9.2342.19200300.100.1.54': ('dITRedirect', ),
'0.9.2342.19200300.100.1.55': ('audio', ),
'0.9.2342.19200300.100.1.56': ('documentPublisher', ),
'0.9.2342.19200300.100.3': ('pilotAttributeSyntax', ),
'0.9.2342.19200300.100.3.4': ('iA5StringSyntax', ),
'0.9.2342.19200300.100.3.5': ('caseIgnoreIA5StringSyntax', ),
'0.9.2342.19200300.100.4': ('pilotObjectClass', ),
'0.9.2342.19200300.100.4.3': ('pilotObject', ),
'0.9.2342.19200300.100.4.4': ('pilotPerson', ),
'0.9.2342.19200300.100.4.5': ('account', ),
'0.9.2342.19200300.100.4.6': ('document', ),
'0.9.2342.19200300.100.4.7': ('room', ),
'0.9.2342.19200300.100.4.9': ('documentSeries', ),
'0.9.2342.19200300.100.4.13': ('Domain', 'domain'),
'0.9.2342.19200300.100.4.14': ('rFC822localPart', ),
'0.9.2342.19200300.100.4.15': ('dNSDomain', ),
'0.9.2342.19200300.100.4.17': ('domainRelatedObject', ),
'0.9.2342.19200300.100.4.18': ('friendlyCountry', ),
'0.9.2342.19200300.100.4.19': ('simpleSecurityObject', ),
'0.9.2342.19200300.100.4.20': ('pilotOrganization', ),
'0.9.2342.19200300.100.4.21': ('pilotDSA', ),
'0.9.2342.19200300.100.4.22': ('qualityLabelledData', ),
'0.9.2342.19200300.100.10': ('pilotGroups', ),
'1': ('iso', 'ISO'),
'1.0.9797.3.4': ('gmac', 'GMAC'),
'1.0.10118.3.0.55': ('whirlpool', ),
'1.2': ('ISO Member Body', 'member-body'),
'1.2.156': ('ISO CN Member Body', 'ISO-CN'),
'1.2.156.10197': ('oscca', ),
'1.2.156.10197.1': ('sm-scheme', ),
'1.2.156.10197.1.104.1': ('sm4-ecb', 'SM4-ECB'),
'1.2.156.10197.1.104.2': ('sm4-cbc', 'SM4-CBC'),
'1.2.156.10197.1.104.3': ('sm4-ofb', 'SM4-OFB'),
'1.2.156.10197.1.104.4': ('sm4-cfb', 'SM4-CFB'),
'1.2.156.10197.1.104.5': ('sm4-cfb1', 'SM4-CFB1'),
'1.2.156.10197.1.104.6': ('sm4-cfb8', 'SM4-CFB8'),
'1.2.156.10197.1.104.7': ('sm4-ctr', 'SM4-CTR'),
'1.2.156.10197.1.301': ('sm2', 'SM2'),
'1.2.156.10197.1.401': ('sm3', 'SM3'),
'1.2.156.10197.1.501': ('SM2-with-SM3', 'SM2-SM3'),
'1.2.156.10197.1.504': ('sm3WithRSAEncryption', 'RSA-SM3'),
'1.2.392.200011.61.1.1.1.2': ('camellia-128-cbc', 'CAMELLIA-128-CBC'),
'1.2.392.200011.61.1.1.1.3': ('camellia-192-cbc', 'CAMELLIA-192-CBC'),
'1.2.392.200011.61.1.1.1.4': ('camellia-256-cbc', 'CAMELLIA-256-CBC'),
'1.2.392.200011.61.1.1.3.2': ('id-camellia128-wrap', ),
'1.2.392.200011.61.1.1.3.3': ('id-camellia192-wrap', ),
'1.2.392.200011.61.1.1.3.4': ('id-camellia256-wrap', ),
'1.2.410.200004': ('kisa', 'KISA'),
'1.2.410.200004.1.3': ('seed-ecb', 'SEED-ECB'),
'1.2.410.200004.1.4': ('seed-cbc', 'SEED-CBC'),
'1.2.410.200004.1.5': ('seed-cfb', 'SEED-CFB'),
'1.2.410.200004.1.6': ('seed-ofb', 'SEED-OFB'),
'1.2.410.200046.1.1': ('aria', ),
'1.2.410.200046.1.1.1': ('aria-128-ecb', 'ARIA-128-ECB'),
'1.2.410.200046.1.1.2': ('aria-128-cbc', 'ARIA-128-CBC'),
'1.2.410.200046.1.1.3': ('aria-128-cfb', 'ARIA-128-CFB'),
'1.2.410.200046.1.1.4': ('aria-128-ofb', 'ARIA-128-OFB'),
'1.2.410.200046.1.1.5': ('aria-128-ctr', 'ARIA-128-CTR'),
'1.2.410.200046.1.1.6': ('aria-192-ecb', 'ARIA-192-ECB'),
'1.2.410.200046.1.1.7': ('aria-192-cbc', 'ARIA-192-CBC'),
'1.2.410.200046.1.1.8': ('aria-192-cfb', 'ARIA-192-CFB'),
'1.2.410.200046.1.1.9': ('aria-192-ofb', 'ARIA-192-OFB'),
'1.2.410.200046.1.1.10': ('aria-192-ctr', 'ARIA-192-CTR'),
'1.2.410.200046.1.1.11': ('aria-256-ecb', 'ARIA-256-ECB'),
'1.2.410.200046.1.1.12': ('aria-256-cbc', 'ARIA-256-CBC'),
'1.2.410.200046.1.1.13': ('aria-256-cfb', 'ARIA-256-CFB'),
'1.2.410.200046.1.1.14': ('aria-256-ofb', 'ARIA-256-OFB'),
'1.2.410.200046.1.1.15': ('aria-256-ctr', 'ARIA-256-CTR'),
'1.2.410.200046.1.1.34': ('aria-128-gcm', 'ARIA-128-GCM'),
'1.2.410.200046.1.1.35': ('aria-192-gcm', 'ARIA-192-GCM'),
'1.2.410.200046.1.1.36': ('aria-256-gcm', 'ARIA-256-GCM'),
'1.2.410.200046.1.1.37': ('aria-128-ccm', 'ARIA-128-CCM'),
'1.2.410.200046.1.1.38': ('aria-192-ccm', 'ARIA-192-CCM'),
'1.2.410.200046.1.1.39': ('aria-256-ccm', 'ARIA-256-CCM'),
'1.2.643.2.2': ('cryptopro', ),
'1.2.643.2.2.3': ('GOST R 34.11-94 with GOST R 34.10-2001', 'id-GostR3411-94-with-GostR3410-2001'),
'1.2.643.2.2.4': ('GOST R 34.11-94 with GOST R 34.10-94', 'id-GostR3411-94-with-GostR3410-94'),
'1.2.643.2.2.9': ('GOST R 34.11-94', 'md_gost94'),
'1.2.643.2.2.10': ('HMAC GOST 34.11-94', 'id-HMACGostR3411-94'),
'1.2.643.2.2.14.0': ('id-Gost28147-89-None-KeyMeshing', ),
'1.2.643.2.2.14.1': ('id-Gost28147-89-CryptoPro-KeyMeshing', ),
'1.2.643.2.2.19': ('GOST R 34.10-2001', 'gost2001'),
'1.2.643.2.2.20': ('GOST R 34.10-94', 'gost94'),
'1.2.643.2.2.20.1': ('id-GostR3410-94-a', ),
'1.2.643.2.2.20.2': ('id-GostR3410-94-aBis', ),
'1.2.643.2.2.20.3': ('id-GostR3410-94-b', ),
'1.2.643.2.2.20.4': ('id-GostR3410-94-bBis', ),
'1.2.643.2.2.21': ('GOST 28147-89', 'gost89'),
'1.2.643.2.2.22': ('GOST 28147-89 MAC', 'gost-mac'),
'1.2.643.2.2.23': ('GOST R 34.11-94 PRF', 'prf-gostr3411-94'),
'1.2.643.2.2.30.0': ('id-GostR3411-94-TestParamSet', ),
'1.2.643.2.2.30.1': ('id-GostR3411-94-CryptoProParamSet', ),
'1.2.643.2.2.31.0': ('id-Gost28147-89-TestParamSet', ),
'1.2.643.2.2.31.1': ('id-Gost28147-89-CryptoPro-A-ParamSet', ),
'1.2.643.2.2.31.2': ('id-Gost28147-89-CryptoPro-B-ParamSet', ),
'1.2.643.2.2.31.3': ('id-Gost28147-89-CryptoPro-C-ParamSet', ),
'1.2.643.2.2.31.4': ('id-Gost28147-89-CryptoPro-D-ParamSet', ),
'1.2.643.2.2.31.5': ('id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet', ),
'1.2.643.2.2.31.6': ('id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet', ),
'1.2.643.2.2.31.7': ('id-Gost28147-89-CryptoPro-RIC-1-ParamSet', ),
'1.2.643.2.2.32.0': ('id-GostR3410-94-TestParamSet', ),
'1.2.643.2.2.32.2': ('id-GostR3410-94-CryptoPro-A-ParamSet', ),
'1.2.643.2.2.32.3': ('id-GostR3410-94-CryptoPro-B-ParamSet', ),
'1.2.643.2.2.32.4': ('id-GostR3410-94-CryptoPro-C-ParamSet', ),
'1.2.643.2.2.32.5': ('id-GostR3410-94-CryptoPro-D-ParamSet', ),
'1.2.643.2.2.33.1': ('id-GostR3410-94-CryptoPro-XchA-ParamSet', ),
'1.2.643.2.2.33.2': ('id-GostR3410-94-CryptoPro-XchB-ParamSet', ),
'1.2.643.2.2.33.3': ('id-GostR3410-94-CryptoPro-XchC-ParamSet', ),
'1.2.643.2.2.35.0': ('id-GostR3410-2001-TestParamSet', ),
'1.2.643.2.2.35.1': ('id-GostR3410-2001-CryptoPro-A-ParamSet', ),
'1.2.643.2.2.35.2': ('id-GostR3410-2001-CryptoPro-B-ParamSet', ),
'1.2.643.2.2.35.3': ('id-GostR3410-2001-CryptoPro-C-ParamSet', ),
'1.2.643.2.2.36.0': ('id-GostR3410-2001-CryptoPro-XchA-ParamSet', ),
'1.2.643.2.2.36.1': ('id-GostR3410-2001-CryptoPro-XchB-ParamSet', ),
'1.2.643.2.2.98': ('GOST R 34.10-2001 DH', 'id-GostR3410-2001DH'),
'1.2.643.2.2.99': ('GOST R 34.10-94 DH', 'id-GostR3410-94DH'),
'1.2.643.2.9': ('cryptocom', ),
'1.2.643.2.9.1.3.3': ('GOST R 34.11-94 with GOST R 34.10-94 Cryptocom', 'id-GostR3411-94-with-GostR3410-94-cc'),
'1.2.643.2.9.1.3.4': ('GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom', 'id-GostR3411-94-with-GostR3410-2001-cc'),
'1.2.643.2.9.1.5.3': ('GOST 34.10-94 Cryptocom', 'gost94cc'),
'1.2.643.2.9.1.5.4': ('GOST 34.10-2001 Cryptocom', 'gost2001cc'),
'1.2.643.2.9.1.6.1': ('GOST 28147-89 Cryptocom ParamSet', 'id-Gost28147-89-cc'),
'1.2.643.2.9.1.8.1': ('GOST R 3410-2001 Parameter Set Cryptocom', 'id-GostR3410-2001-ParamSet-cc'),
'1.2.643.3.131.1.1': ('INN', 'INN'),
'1.2.643.7.1': ('id-tc26', ),
'1.2.643.7.1.1': ('id-tc26-algorithms', ),
'1.2.643.7.1.1.1': ('id-tc26-sign', ),
'1.2.643.7.1.1.1.1': ('GOST R 34.10-2012 with 256 bit modulus', 'gost2012_256'),
'1.2.643.7.1.1.1.2': ('GOST R 34.10-2012 with 512 bit modulus', 'gost2012_512'),
'1.2.643.7.1.1.2': ('id-tc26-digest', ),
'1.2.643.7.1.1.2.2': ('GOST R 34.11-2012 with 256 bit hash', 'md_gost12_256'),
'1.2.643.7.1.1.2.3': ('GOST R 34.11-2012 with 512 bit hash', 'md_gost12_512'),
'1.2.643.7.1.1.3': ('id-tc26-signwithdigest', ),
'1.2.643.7.1.1.3.2': ('GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)', 'id-tc26-signwithdigest-gost3410-2012-256'),
'1.2.643.7.1.1.3.3': ('GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)', 'id-tc26-signwithdigest-gost3410-2012-512'),
'1.2.643.7.1.1.4': ('id-tc26-mac', ),
'1.2.643.7.1.1.4.1': ('HMAC GOST 34.11-2012 256 bit', 'id-tc26-hmac-gost-3411-2012-256'),
'1.2.643.7.1.1.4.2': ('HMAC GOST 34.11-2012 512 bit', 'id-tc26-hmac-gost-3411-2012-512'),
'1.2.643.7.1.1.5': ('id-tc26-cipher', ),
'1.2.643.7.1.1.5.1': ('id-tc26-cipher-gostr3412-2015-magma', ),
'1.2.643.7.1.1.5.1.1': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm', ),
'1.2.643.7.1.1.5.1.2': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm-omac', ),
'1.2.643.7.1.1.5.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik', ),
'1.2.643.7.1.1.5.2.1': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm', ),
'1.2.643.7.1.1.5.2.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm-omac', ),
'1.2.643.7.1.1.6': ('id-tc26-agreement', ),
'1.2.643.7.1.1.6.1': ('id-tc26-agreement-gost-3410-2012-256', ),
'1.2.643.7.1.1.6.2': ('id-tc26-agreement-gost-3410-2012-512', ),
'1.2.643.7.1.1.7': ('id-tc26-wrap', ),
'1.2.643.7.1.1.7.1': ('id-tc26-wrap-gostr3412-2015-magma', ),
'1.2.643.7.1.1.7.1.1': ('id-tc26-wrap-gostr3412-2015-magma-kexp15', 'id-tc26-wrap-gostr3412-2015-kuznyechik-kexp15'),
'1.2.643.7.1.1.7.2': ('id-tc26-wrap-gostr3412-2015-kuznyechik', ),
'1.2.643.7.1.2': ('id-tc26-constants', ),
'1.2.643.7.1.2.1': ('id-tc26-sign-constants', ),
'1.2.643.7.1.2.1.1': ('id-tc26-gost-3410-2012-256-constants', ),
'1.2.643.7.1.2.1.1.1': ('GOST R 34.10-2012 (256 bit) ParamSet A', 'id-tc26-gost-3410-2012-256-paramSetA'),
'1.2.643.7.1.2.1.1.2': ('GOST R 34.10-2012 (256 bit) ParamSet B', 'id-tc26-gost-3410-2012-256-paramSetB'),
'1.2.643.7.1.2.1.1.3': ('GOST R 34.10-2012 (256 bit) ParamSet C', 'id-tc26-gost-3410-2012-256-paramSetC'),
'1.2.643.7.1.2.1.1.4': ('GOST R 34.10-2012 (256 bit) ParamSet D', 'id-tc26-gost-3410-2012-256-paramSetD'),
'1.2.643.7.1.2.1.2': ('id-tc26-gost-3410-2012-512-constants', ),
'1.2.643.7.1.2.1.2.0': ('GOST R 34.10-2012 (512 bit) testing parameter set', 'id-tc26-gost-3410-2012-512-paramSetTest'),
'1.2.643.7.1.2.1.2.1': ('GOST R 34.10-2012 (512 bit) ParamSet A', 'id-tc26-gost-3410-2012-512-paramSetA'),
'1.2.643.7.1.2.1.2.2': ('GOST R 34.10-2012 (512 bit) ParamSet B', 'id-tc26-gost-3410-2012-512-paramSetB'),
'1.2.643.7.1.2.1.2.3': ('GOST R 34.10-2012 (512 bit) ParamSet C', 'id-tc26-gost-3410-2012-512-paramSetC'),
'1.2.643.7.1.2.2': ('id-tc26-digest-constants', ),
'1.2.643.7.1.2.5': ('id-tc26-cipher-constants', ),
'1.2.643.7.1.2.5.1': ('id-tc26-gost-28147-constants', ),
'1.2.643.7.1.2.5.1.1': ('GOST 28147-89 TC26 parameter set', 'id-tc26-gost-28147-param-Z'),
'1.2.643.100.1': ('OGRN', 'OGRN'),
'1.2.643.100.3': ('SNILS', 'SNILS'),
'1.2.643.100.111': ('Signing Tool of Subject', 'subjectSignTool'),
'1.2.643.100.112': ('Signing Tool of Issuer', 'issuerSignTool'),
'1.2.804': ('ISO-UA', ),
'1.2.804.2.1.1.1': ('ua-pki', ),
'1.2.804.2.1.1.1.1.1.1': ('DSTU Gost 28147-2009', 'dstu28147'),
'1.2.804.2.1.1.1.1.1.1.2': ('DSTU Gost 28147-2009 OFB mode', 'dstu28147-ofb'),
'1.2.804.2.1.1.1.1.1.1.3': ('DSTU Gost 28147-2009 CFB mode', 'dstu28147-cfb'),
'1.2.804.2.1.1.1.1.1.1.5': ('DSTU Gost 28147-2009 key wrap', 'dstu28147-wrap'),
'1.2.804.2.1.1.1.1.1.2': ('HMAC DSTU Gost 34311-95', 'hmacWithDstu34311'),
'1.2.804.2.1.1.1.1.2.1': ('DSTU Gost 34311-95', 'dstu34311'),
'1.2.804.2.1.1.1.1.3.1.1': ('DSTU 4145-2002 little endian', 'dstu4145le'),
'1.2.804.2.1.1.1.1.3.1.1.1.1': ('DSTU 4145-2002 big endian', 'dstu4145be'),
'1.2.804.2.1.1.1.1.3.1.1.2.0': ('DSTU curve 0', 'uacurve0'),
'1.2.804.2.1.1.1.1.3.1.1.2.1': ('DSTU curve 1', 'uacurve1'),
'1.2.804.2.1.1.1.1.3.1.1.2.2': ('DSTU curve 2', 'uacurve2'),
'1.2.804.2.1.1.1.1.3.1.1.2.3': ('DSTU curve 3', 'uacurve3'),
'1.2.804.2.1.1.1.1.3.1.1.2.4': ('DSTU curve 4', 'uacurve4'),
'1.2.804.2.1.1.1.1.3.1.1.2.5': ('DSTU curve 5', 'uacurve5'),
'1.2.804.2.1.1.1.1.3.1.1.2.6': ('DSTU curve 6', 'uacurve6'),
'1.2.804.2.1.1.1.1.3.1.1.2.7': ('DSTU curve 7', 'uacurve7'),
'1.2.804.2.1.1.1.1.3.1.1.2.8': ('DSTU curve 8', 'uacurve8'),
'1.2.804.2.1.1.1.1.3.1.1.2.9': ('DSTU curve 9', 'uacurve9'),
'1.2.840': ('ISO US Member Body', 'ISO-US'),
'1.2.840.10040': ('X9.57', 'X9-57'),
'1.2.840.10040.2': ('holdInstruction', ),
'1.2.840.10040.2.1': ('Hold Instruction None', 'holdInstructionNone'),
'1.2.840.10040.2.2': ('Hold Instruction Call Issuer', 'holdInstructionCallIssuer'),
'1.2.840.10040.2.3': ('Hold Instruction Reject', 'holdInstructionReject'),
'1.2.840.10040.4': ('X9.57 CM ?', 'X9cm'),
'1.2.840.10040.4.1': ('dsaEncryption', 'DSA'),
'1.2.840.10040.4.3': ('dsaWithSHA1', 'DSA-SHA1'),
'1.2.840.10045': ('ANSI X9.62', 'ansi-X9-62'),
'1.2.840.10045.1': ('id-fieldType', ),
'1.2.840.10045.1.1': ('prime-field', ),
'1.2.840.10045.1.2': ('characteristic-two-field', ),
'1.2.840.10045.1.2.3': ('id-characteristic-two-basis', ),
'1.2.840.10045.1.2.3.1': ('onBasis', ),
'1.2.840.10045.1.2.3.2': ('tpBasis', ),
'1.2.840.10045.1.2.3.3': ('ppBasis', ),
'1.2.840.10045.2': ('id-publicKeyType', ),
'1.2.840.10045.2.1': ('id-ecPublicKey', ),
'1.2.840.10045.3': ('ellipticCurve', ),
'1.2.840.10045.3.0': ('c-TwoCurve', ),
'1.2.840.10045.3.0.1': ('c2pnb163v1', ),
'1.2.840.10045.3.0.2': ('c2pnb163v2', ),
'1.2.840.10045.3.0.3': ('c2pnb163v3', ),
'1.2.840.10045.3.0.4': ('c2pnb176v1', ),
'1.2.840.10045.3.0.5': ('c2tnb191v1', ),
'1.2.840.10045.3.0.6': ('c2tnb191v2', ),
'1.2.840.10045.3.0.7': ('c2tnb191v3', ),
'1.2.840.10045.3.0.8': ('c2onb191v4', ),
'1.2.840.10045.3.0.9': ('c2onb191v5', ),
'1.2.840.10045.3.0.10': ('c2pnb208w1', ),
'1.2.840.10045.3.0.11': ('c2tnb239v1', ),
'1.2.840.10045.3.0.12': ('c2tnb239v2', ),
'1.2.840.10045.3.0.13': ('c2tnb239v3', ),
'1.2.840.10045.3.0.14': ('c2onb239v4', ),
'1.2.840.10045.3.0.15': ('c2onb239v5', ),
'1.2.840.10045.3.0.16': ('c2pnb272w1', ),
'1.2.840.10045.3.0.17': ('c2pnb304w1', ),
'1.2.840.10045.3.0.18': ('c2tnb359v1', ),
'1.2.840.10045.3.0.19': ('c2pnb368w1', ),
'1.2.840.10045.3.0.20': ('c2tnb431r1', ),
'1.2.840.10045.3.1': ('primeCurve', ),
'1.2.840.10045.3.1.1': ('prime192v1', ),
'1.2.840.10045.3.1.2': ('prime192v2', ),
'1.2.840.10045.3.1.3': ('prime192v3', ),
'1.2.840.10045.3.1.4': ('prime239v1', ),
'1.2.840.10045.3.1.5': ('prime239v2', ),
'1.2.840.10045.3.1.6': ('prime239v3', ),
'1.2.840.10045.3.1.7': ('prime256v1', ),
'1.2.840.10045.4': ('id-ecSigType', ),
'1.2.840.10045.4.1': ('ecdsa-with-SHA1', ),
'1.2.840.10045.4.2': ('ecdsa-with-Recommended', ),
'1.2.840.10045.4.3': ('ecdsa-with-Specified', ),
'1.2.840.10045.4.3.1': ('ecdsa-with-SHA224', ),
'1.2.840.10045.4.3.2': ('ecdsa-with-SHA256', ),
'1.2.840.10045.4.3.3': ('ecdsa-with-SHA384', ),
'1.2.840.10045.4.3.4': ('ecdsa-with-SHA512', ),
'1.2.840.10046.2.1': ('X9.42 DH', 'dhpublicnumber'),
'1.2.840.113533.7.66.10': ('cast5-cbc', 'CAST5-CBC'),
'1.2.840.113533.7.66.12': ('pbeWithMD5AndCast5CBC', ),
'1.2.840.113533.7.66.13': ('password based MAC', 'id-PasswordBasedMAC'),
'1.2.840.113533.7.66.30': ('Diffie-Hellman based MAC', 'id-DHBasedMac'),
'1.2.840.113549': ('RSA Data Security, Inc.', 'rsadsi'),
'1.2.840.113549.1': ('RSA Data Security, Inc. PKCS', 'pkcs'),
'1.2.840.113549.1.1': ('pkcs1', ),
'1.2.840.113549.1.1.1': ('rsaEncryption', ),
'1.2.840.113549.1.1.2': ('md2WithRSAEncryption', 'RSA-MD2'),
'1.2.840.113549.1.1.3': ('md4WithRSAEncryption', 'RSA-MD4'),
'1.2.840.113549.1.1.4': ('md5WithRSAEncryption', 'RSA-MD5'),
'1.2.840.113549.1.1.5': ('sha1WithRSAEncryption', 'RSA-SHA1'),
'1.2.840.113549.1.1.6': ('rsaOAEPEncryptionSET', ),
'1.2.840.113549.1.1.7': ('rsaesOaep', 'RSAES-OAEP'),
'1.2.840.113549.1.1.8': ('mgf1', 'MGF1'),
'1.2.840.113549.1.1.9': ('pSpecified', 'PSPECIFIED'),
'1.2.840.113549.1.1.10': ('rsassaPss', 'RSASSA-PSS'),
'1.2.840.113549.1.1.11': ('sha256WithRSAEncryption', 'RSA-SHA256'),
'1.2.840.113549.1.1.12': ('sha384WithRSAEncryption', 'RSA-SHA384'),
'1.2.840.113549.1.1.13': ('sha512WithRSAEncryption', 'RSA-SHA512'),
'1.2.840.113549.1.1.14': ('sha224WithRSAEncryption', 'RSA-SHA224'),
'1.2.840.113549.1.1.15': ('sha512-224WithRSAEncryption', 'RSA-SHA512/224'),
'1.2.840.113549.1.1.16': ('sha512-256WithRSAEncryption', 'RSA-SHA512/256'),
'1.2.840.113549.1.3': ('pkcs3', ),
'1.2.840.113549.1.3.1': ('dhKeyAgreement', ),
'1.2.840.113549.1.5': ('pkcs5', ),
'1.2.840.113549.1.5.1': ('pbeWithMD2AndDES-CBC', 'PBE-MD2-DES'),
'1.2.840.113549.1.5.3': ('pbeWithMD5AndDES-CBC', 'PBE-MD5-DES'),
'1.2.840.113549.1.5.4': ('pbeWithMD2AndRC2-CBC', 'PBE-MD2-RC2-64'),
'1.2.840.113549.1.5.6': ('pbeWithMD5AndRC2-CBC', 'PBE-MD5-RC2-64'),
'1.2.840.113549.1.5.10': ('pbeWithSHA1AndDES-CBC', 'PBE-SHA1-DES'),
'1.2.840.113549.1.5.11': ('pbeWithSHA1AndRC2-CBC', 'PBE-SHA1-RC2-64'),
'1.2.840.113549.1.5.12': ('PBKDF2', ),
'1.2.840.113549.1.5.13': ('PBES2', ),
'1.2.840.113549.1.5.14': ('PBMAC1', ),
'1.2.840.113549.1.7': ('pkcs7', ),
'1.2.840.113549.1.7.1': ('pkcs7-data', ),
'1.2.840.113549.1.7.2': ('pkcs7-signedData', ),
'1.2.840.113549.1.7.3': ('pkcs7-envelopedData', ),
'1.2.840.113549.1.7.4': ('pkcs7-signedAndEnvelopedData', ),
'1.2.840.113549.1.7.5': ('pkcs7-digestData', ),
'1.2.840.113549.1.7.6': ('pkcs7-encryptedData', ),
'1.2.840.113549.1.9': ('pkcs9', ),
'1.2.840.113549.1.9.1': ('emailAddress', ),
'1.2.840.113549.1.9.2': ('unstructuredName', ),
'1.2.840.113549.1.9.3': ('contentType', ),
'1.2.840.113549.1.9.4': ('messageDigest', ),
'1.2.840.113549.1.9.5': ('signingTime', ),
'1.2.840.113549.1.9.6': ('countersignature', ),
'1.2.840.113549.1.9.7': ('challengePassword', ),
'1.2.840.113549.1.9.8': ('unstructuredAddress', ),
'1.2.840.113549.1.9.9': ('extendedCertificateAttributes', ),
'1.2.840.113549.1.9.14': ('Extension Request', 'extReq'),
'1.2.840.113549.1.9.15': ('S/MIME Capabilities', 'SMIME-CAPS'),
'1.2.840.113549.1.9.16': ('S/MIME', 'SMIME'),
'1.2.840.113549.1.9.16.0': ('id-smime-mod', ),
'1.2.840.113549.1.9.16.0.1': ('id-smime-mod-cms', ),
'1.2.840.113549.1.9.16.0.2': ('id-smime-mod-ess', ),
'1.2.840.113549.1.9.16.0.3': ('id-smime-mod-oid', ),
'1.2.840.113549.1.9.16.0.4': ('id-smime-mod-msg-v3', ),
'1.2.840.113549.1.9.16.0.5': ('id-smime-mod-ets-eSignature-88', ),
'1.2.840.113549.1.9.16.0.6': ('id-smime-mod-ets-eSignature-97', ),
'1.2.840.113549.1.9.16.0.7': ('id-smime-mod-ets-eSigPolicy-88', ),
'1.2.840.113549.1.9.16.0.8': ('id-smime-mod-ets-eSigPolicy-97', ),
'1.2.840.113549.1.9.16.1': ('id-smime-ct', ),
'1.2.840.113549.1.9.16.1.1': ('id-smime-ct-receipt', ),
'1.2.840.113549.1.9.16.1.2': ('id-smime-ct-authData', ),
'1.2.840.113549.1.9.16.1.3': ('id-smime-ct-publishCert', ),
'1.2.840.113549.1.9.16.1.4': ('id-smime-ct-TSTInfo', ),
'1.2.840.113549.1.9.16.1.5': ('id-smime-ct-TDTInfo', ),
'1.2.840.113549.1.9.16.1.6': ('id-smime-ct-contentInfo', ),
'1.2.840.113549.1.9.16.1.7': ('id-smime-ct-DVCSRequestData', ),
'1.2.840.113549.1.9.16.1.8': ('id-smime-ct-DVCSResponseData', ),
'1.2.840.113549.1.9.16.1.9': ('id-smime-ct-compressedData', ),
'1.2.840.113549.1.9.16.1.19': ('id-smime-ct-contentCollection', ),
'1.2.840.113549.1.9.16.1.23': ('id-smime-ct-authEnvelopedData', ),
'1.2.840.113549.1.9.16.1.27': ('id-ct-asciiTextWithCRLF', ),
'1.2.840.113549.1.9.16.1.28': ('id-ct-xml', ),
'1.2.840.113549.1.9.16.2': ('id-smime-aa', ),
'1.2.840.113549.1.9.16.2.1': ('id-smime-aa-receiptRequest', ),
'1.2.840.113549.1.9.16.2.2': ('id-smime-aa-securityLabel', ),
'1.2.840.113549.1.9.16.2.3': ('id-smime-aa-mlExpandHistory', ),
'1.2.840.113549.1.9.16.2.4': ('id-smime-aa-contentHint', ),
'1.2.840.113549.1.9.16.2.5': ('id-smime-aa-msgSigDigest', ),
'1.2.840.113549.1.9.16.2.6': ('id-smime-aa-encapContentType', ),
'1.2.840.113549.1.9.16.2.7': ('id-smime-aa-contentIdentifier', ),
'1.2.840.113549.1.9.16.2.8': ('id-smime-aa-macValue', ),
'1.2.840.113549.1.9.16.2.9': ('id-smime-aa-equivalentLabels', ),
'1.2.840.113549.1.9.16.2.10': ('id-smime-aa-contentReference', ),
'1.2.840.113549.1.9.16.2.11': ('id-smime-aa-encrypKeyPref', ),
'1.2.840.113549.1.9.16.2.12': ('id-smime-aa-signingCertificate', ),
'1.2.840.113549.1.9.16.2.13': ('id-smime-aa-smimeEncryptCerts', ),
'1.2.840.113549.1.9.16.2.14': ('id-smime-aa-timeStampToken', ),
'1.2.840.113549.1.9.16.2.15': ('id-smime-aa-ets-sigPolicyId', ),
'1.2.840.113549.1.9.16.2.16': ('id-smime-aa-ets-commitmentType', ),
'1.2.840.113549.1.9.16.2.17': ('id-smime-aa-ets-signerLocation', ),
'1.2.840.113549.1.9.16.2.18': ('id-smime-aa-ets-signerAttr', ),
'1.2.840.113549.1.9.16.2.19': ('id-smime-aa-ets-otherSigCert', ),
'1.2.840.113549.1.9.16.2.20': ('id-smime-aa-ets-contentTimestamp', ),
'1.2.840.113549.1.9.16.2.21': ('id-smime-aa-ets-CertificateRefs', ),
'1.2.840.113549.1.9.16.2.22': ('id-smime-aa-ets-RevocationRefs', ),
'1.2.840.113549.1.9.16.2.23': ('id-smime-aa-ets-certValues', ),
'1.2.840.113549.1.9.16.2.24': ('id-smime-aa-ets-revocationValues', ),
'1.2.840.113549.1.9.16.2.25': ('id-smime-aa-ets-escTimeStamp', ),
'1.2.840.113549.1.9.16.2.26': ('id-smime-aa-ets-certCRLTimestamp', ),
'1.2.840.113549.1.9.16.2.27': ('id-smime-aa-ets-archiveTimeStamp', ),
'1.2.840.113549.1.9.16.2.28': ('id-smime-aa-signatureType', ),
'1.2.840.113549.1.9.16.2.29': ('id-smime-aa-dvcs-dvc', ),
'1.2.840.113549.1.9.16.2.47': ('id-smime-aa-signingCertificateV2', ),
'1.2.840.113549.1.9.16.3': ('id-smime-alg', ),
'1.2.840.113549.1.9.16.3.1': ('id-smime-alg-ESDHwith3DES', ),
'1.2.840.113549.1.9.16.3.2': ('id-smime-alg-ESDHwithRC2', ),
'1.2.840.113549.1.9.16.3.3': ('id-smime-alg-3DESwrap', ),
'1.2.840.113549.1.9.16.3.4': ('id-smime-alg-RC2wrap', ),
'1.2.840.113549.1.9.16.3.5': ('id-smime-alg-ESDH', ),
'1.2.840.113549.1.9.16.3.6': ('id-smime-alg-CMS3DESwrap', ),
'1.2.840.113549.1.9.16.3.7': ('id-smime-alg-CMSRC2wrap', ),
'1.2.840.113549.1.9.16.3.8': ('zlib compression', 'ZLIB'),
'1.2.840.113549.1.9.16.3.9': ('id-alg-PWRI-KEK', ),
'1.2.840.113549.1.9.16.4': ('id-smime-cd', ),
'1.2.840.113549.1.9.16.4.1': ('id-smime-cd-ldap', ),
'1.2.840.113549.1.9.16.5': ('id-smime-spq', ),
'1.2.840.113549.1.9.16.5.1': ('id-smime-spq-ets-sqt-uri', ),
'1.2.840.113549.1.9.16.5.2': ('id-smime-spq-ets-sqt-unotice', ),
'1.2.840.113549.1.9.16.6': ('id-smime-cti', ),
'1.2.840.113549.1.9.16.6.1': ('id-smime-cti-ets-proofOfOrigin', ),
'1.2.840.113549.1.9.16.6.2': ('id-smime-cti-ets-proofOfReceipt', ),
'1.2.840.113549.1.9.16.6.3': ('id-smime-cti-ets-proofOfDelivery', ),
'1.2.840.113549.1.9.16.6.4': ('id-smime-cti-ets-proofOfSender', ),
'1.2.840.113549.1.9.16.6.5': ('id-smime-cti-ets-proofOfApproval', ),
'1.2.840.113549.1.9.16.6.6': ('id-smime-cti-ets-proofOfCreation', ),
'1.2.840.113549.1.9.20': ('friendlyName', ),
'1.2.840.113549.1.9.21': ('localKeyID', ),
'1.2.840.113549.1.9.22': ('certTypes', ),
'1.2.840.113549.1.9.22.1': ('x509Certificate', ),
'1.2.840.113549.1.9.22.2': ('sdsiCertificate', ),
'1.2.840.113549.1.9.23': ('crlTypes', ),
'1.2.840.113549.1.9.23.1': ('x509Crl', ),
'1.2.840.113549.1.12': ('pkcs12', ),
'1.2.840.113549.1.12.1': ('pkcs12-pbeids', ),
'1.2.840.113549.1.12.1.1': ('pbeWithSHA1And128BitRC4', 'PBE-SHA1-RC4-128'),
'1.2.840.113549.1.12.1.2': ('pbeWithSHA1And40BitRC4', 'PBE-SHA1-RC4-40'),
'1.2.840.113549.1.12.1.3': ('pbeWithSHA1And3-KeyTripleDES-CBC', 'PBE-SHA1-3DES'),
'1.2.840.113549.1.12.1.4': ('pbeWithSHA1And2-KeyTripleDES-CBC', 'PBE-SHA1-2DES'),
'1.2.840.113549.1.12.1.5': ('pbeWithSHA1And128BitRC2-CBC', 'PBE-SHA1-RC2-128'),
'1.2.840.113549.1.12.1.6': ('pbeWithSHA1And40BitRC2-CBC', 'PBE-SHA1-RC2-40'),
'1.2.840.113549.1.12.10': ('pkcs12-Version1', ),
'1.2.840.113549.1.12.10.1': ('pkcs12-BagIds', ),
'1.2.840.113549.1.12.10.1.1': ('keyBag', ),
'1.2.840.113549.1.12.10.1.2': ('pkcs8ShroudedKeyBag', ),
'1.2.840.113549.1.12.10.1.3': ('certBag', ),
'1.2.840.113549.1.12.10.1.4': ('crlBag', ),
'1.2.840.113549.1.12.10.1.5': ('secretBag', ),
'1.2.840.113549.1.12.10.1.6': ('safeContentsBag', ),
'1.2.840.113549.2.2': ('md2', 'MD2'),
'1.2.840.113549.2.4': ('md4', 'MD4'),
'1.2.840.113549.2.5': ('md5', 'MD5'),
'1.2.840.113549.2.6': ('hmacWithMD5', ),
'1.2.840.113549.2.7': ('hmacWithSHA1', ),
'1.2.840.113549.2.8': ('hmacWithSHA224', ),
'1.2.840.113549.2.9': ('hmacWithSHA256', ),
'1.2.840.113549.2.10': ('hmacWithSHA384', ),
'1.2.840.113549.2.11': ('hmacWithSHA512', ),
'1.2.840.113549.2.12': ('hmacWithSHA512-224', ),
'1.2.840.113549.2.13': ('hmacWithSHA512-256', ),
'1.2.840.113549.3.2': ('rc2-cbc', 'RC2-CBC'),
'1.2.840.113549.3.4': ('rc4', 'RC4'),
'1.2.840.113549.3.7': ('des-ede3-cbc', 'DES-EDE3-CBC'),
'1.2.840.113549.3.8': ('rc5-cbc', 'RC5-CBC'),
'1.2.840.113549.3.10': ('des-cdmf', 'DES-CDMF'),
'1.3': ('identified-organization', 'org', 'ORG'),
'1.3.6': ('dod', 'DOD'),
'1.3.6.1': ('iana', 'IANA', 'internet'),
'1.3.6.1.1': ('Directory', 'directory'),
'1.3.6.1.2': ('Management', 'mgmt'),
'1.3.6.1.3': ('Experimental', 'experimental'),
'1.3.6.1.4': ('Private', 'private'),
'1.3.6.1.4.1': ('Enterprises', 'enterprises'),
'1.3.6.1.4.1.188.7.1.1.2': ('idea-cbc', 'IDEA-CBC'),
'1.3.6.1.4.1.311.2.1.14': ('Microsoft Extension Request', 'msExtReq'),
'1.3.6.1.4.1.311.2.1.21': ('Microsoft Individual Code Signing', 'msCodeInd'),
'1.3.6.1.4.1.311.2.1.22': ('Microsoft Commercial Code Signing', 'msCodeCom'),
'1.3.6.1.4.1.311.10.3.1': ('Microsoft Trust List Signing', 'msCTLSign'),
'1.3.6.1.4.1.311.10.3.3': ('Microsoft Server Gated Crypto', 'msSGC'),
'1.3.6.1.4.1.311.10.3.4': ('Microsoft Encrypted File System', 'msEFS'),
'1.3.6.1.4.1.311.17.1': ('Microsoft CSP Name', 'CSPName'),
'1.3.6.1.4.1.311.17.2': ('Microsoft Local Key set', 'LocalKeySet'),
'1.3.6.1.4.1.311.20.2.2': ('Microsoft Smartcardlogin', 'msSmartcardLogin'),
'1.3.6.1.4.1.311.20.2.3': ('Microsoft Universal Principal Name', 'msUPN'),
'1.3.6.1.4.1.311.60.2.1.1': ('jurisdictionLocalityName', 'jurisdictionL'),
'1.3.6.1.4.1.311.60.2.1.2': ('jurisdictionStateOrProvinceName', 'jurisdictionST'),
'1.3.6.1.4.1.311.60.2.1.3': ('jurisdictionCountryName', 'jurisdictionC'),
'1.3.6.1.4.1.1466.344': ('dcObject', 'dcobject'),
'1.3.6.1.4.1.1722.12.2.1.16': ('blake2b512', 'BLAKE2b512'),
'1.3.6.1.4.1.1722.12.2.2.8': ('blake2s256', 'BLAKE2s256'),
'1.3.6.1.4.1.3029.1.2': ('bf-cbc', 'BF-CBC'),
'1.3.6.1.4.1.11129.2.4.2': ('CT Precertificate SCTs', 'ct_precert_scts'),
'1.3.6.1.4.1.11129.2.4.3': ('CT Precertificate Poison', 'ct_precert_poison'),
'1.3.6.1.4.1.11129.2.4.4': ('CT Precertificate Signer', 'ct_precert_signer'),
'1.3.6.1.4.1.11129.2.4.5': ('CT Certificate SCTs', 'ct_cert_scts'),
'1.3.6.1.4.1.11591.4.11': ('scrypt', 'id-scrypt'),
'1.3.6.1.5': ('Security', 'security'),
'1.3.6.1.5.2.3': ('id-pkinit', ),
'1.3.6.1.5.2.3.4': ('PKINIT Client Auth', 'pkInitClientAuth'),
'1.3.6.1.5.2.3.5': ('Signing KDC Response', 'pkInitKDC'),
'1.3.6.1.5.5.7': ('PKIX', ),
'1.3.6.1.5.5.7.0': ('id-pkix-mod', ),
'1.3.6.1.5.5.7.0.1': ('id-pkix1-explicit-88', ),
'1.3.6.1.5.5.7.0.2': ('id-pkix1-implicit-88', ),
'1.3.6.1.5.5.7.0.3': ('id-pkix1-explicit-93', ),
'1.3.6.1.5.5.7.0.4': ('id-pkix1-implicit-93', ),
'1.3.6.1.5.5.7.0.5': ('id-mod-crmf', ),
'1.3.6.1.5.5.7.0.6': ('id-mod-cmc', ),
'1.3.6.1.5.5.7.0.7': ('id-mod-kea-profile-88', ),
'1.3.6.1.5.5.7.0.8': ('id-mod-kea-profile-93', ),
'1.3.6.1.5.5.7.0.9': ('id-mod-cmp', ),
'1.3.6.1.5.5.7.0.10': ('id-mod-qualified-cert-88', ),
'1.3.6.1.5.5.7.0.11': ('id-mod-qualified-cert-93', ),
'1.3.6.1.5.5.7.0.12': ('id-mod-attribute-cert', ),
'1.3.6.1.5.5.7.0.13': ('id-mod-timestamp-protocol', ),
'1.3.6.1.5.5.7.0.14': ('id-mod-ocsp', ),
'1.3.6.1.5.5.7.0.15': ('id-mod-dvcs', ),
'1.3.6.1.5.5.7.0.16': ('id-mod-cmp2000', ),
'1.3.6.1.5.5.7.1': ('id-pe', ),
'1.3.6.1.5.5.7.1.1': ('Authority Information Access', 'authorityInfoAccess'),
'1.3.6.1.5.5.7.1.2': ('Biometric Info', 'biometricInfo'),
'1.3.6.1.5.5.7.1.3': ('qcStatements', ),
'1.3.6.1.5.5.7.1.4': ('ac-auditEntity', ),
'1.3.6.1.5.5.7.1.5': ('ac-targeting', ),
'1.3.6.1.5.5.7.1.6': ('aaControls', ),
'1.3.6.1.5.5.7.1.7': ('sbgp-ipAddrBlock', ),
'1.3.6.1.5.5.7.1.8': ('sbgp-autonomousSysNum', ),
'1.3.6.1.5.5.7.1.9': ('sbgp-routerIdentifier', ),
'1.3.6.1.5.5.7.1.10': ('ac-proxying', ),
'1.3.6.1.5.5.7.1.11': ('Subject Information Access', 'subjectInfoAccess'),
'1.3.6.1.5.5.7.1.14': ('Proxy Certificate Information', 'proxyCertInfo'),
'1.3.6.1.5.5.7.1.24': ('TLS Feature', 'tlsfeature'),
'1.3.6.1.5.5.7.2': ('id-qt', ),
'1.3.6.1.5.5.7.2.1': ('Policy Qualifier CPS', 'id-qt-cps'),
'1.3.6.1.5.5.7.2.2': ('Policy Qualifier User Notice', 'id-qt-unotice'),
'1.3.6.1.5.5.7.2.3': ('textNotice', ),
'1.3.6.1.5.5.7.3': ('id-kp', ),
'1.3.6.1.5.5.7.3.1': ('TLS Web Server Authentication', 'serverAuth'),
'1.3.6.1.5.5.7.3.2': ('TLS Web Client Authentication', 'clientAuth'),
'1.3.6.1.5.5.7.3.3': ('Code Signing', 'codeSigning'),
'1.3.6.1.5.5.7.3.4': ('E-mail Protection', 'emailProtection'),
'1.3.6.1.5.5.7.3.5': ('IPSec End System', 'ipsecEndSystem'),
'1.3.6.1.5.5.7.3.6': ('IPSec Tunnel', 'ipsecTunnel'),
'1.3.6.1.5.5.7.3.7': ('IPSec User', 'ipsecUser'),
'1.3.6.1.5.5.7.3.8': ('Time Stamping', 'timeStamping'),
'1.3.6.1.5.5.7.3.9': ('OCSP Signing', 'OCSPSigning'),
'1.3.6.1.5.5.7.3.10': ('dvcs', 'DVCS'),
'1.3.6.1.5.5.7.3.17': ('ipsec Internet Key Exchange', 'ipsecIKE'),
'1.3.6.1.5.5.7.3.18': ('Ctrl/provision WAP Access', 'capwapAC'),
'1.3.6.1.5.5.7.3.19': ('Ctrl/Provision WAP Termination', 'capwapWTP'),
'1.3.6.1.5.5.7.3.21': ('SSH Client', 'secureShellClient'),
'1.3.6.1.5.5.7.3.22': ('SSH Server', 'secureShellServer'),
'1.3.6.1.5.5.7.3.23': ('Send Router', 'sendRouter'),
'1.3.6.1.5.5.7.3.24': ('Send Proxied Router', 'sendProxiedRouter'),
'1.3.6.1.5.5.7.3.25': ('Send Owner', 'sendOwner'),
'1.3.6.1.5.5.7.3.26': ('Send Proxied Owner', 'sendProxiedOwner'),
'1.3.6.1.5.5.7.3.27': ('CMC Certificate Authority', 'cmcCA'),
'1.3.6.1.5.5.7.3.28': ('CMC Registration Authority', 'cmcRA'),
'1.3.6.1.5.5.7.4': ('id-it', ),
'1.3.6.1.5.5.7.4.1': ('id-it-caProtEncCert', ),
'1.3.6.1.5.5.7.4.2': ('id-it-signKeyPairTypes', ),
'1.3.6.1.5.5.7.4.3': ('id-it-encKeyPairTypes', ),
'1.3.6.1.5.5.7.4.4': ('id-it-preferredSymmAlg', ),
'1.3.6.1.5.5.7.4.5': ('id-it-caKeyUpdateInfo', ),
'1.3.6.1.5.5.7.4.6': ('id-it-currentCRL', ),
'1.3.6.1.5.5.7.4.7': ('id-it-unsupportedOIDs', ),
'1.3.6.1.5.5.7.4.8': ('id-it-subscriptionRequest', ),
'1.3.6.1.5.5.7.4.9': ('id-it-subscriptionResponse', ),
'1.3.6.1.5.5.7.4.10': ('id-it-keyPairParamReq', ),
'1.3.6.1.5.5.7.4.11': ('id-it-keyPairParamRep', ),
'1.3.6.1.5.5.7.4.12': ('id-it-revPassphrase', ),
'1.3.6.1.5.5.7.4.13': ('id-it-implicitConfirm', ),
'1.3.6.1.5.5.7.4.14': ('id-it-confirmWaitTime', ),
'1.3.6.1.5.5.7.4.15': ('id-it-origPKIMessage', ),
'1.3.6.1.5.5.7.4.16': ('id-it-suppLangTags', ),
'1.3.6.1.5.5.7.5': ('id-pkip', ),
'1.3.6.1.5.5.7.5.1': ('id-regCtrl', ),
'1.3.6.1.5.5.7.5.1.1': ('id-regCtrl-regToken', ),
'1.3.6.1.5.5.7.5.1.2': ('id-regCtrl-authenticator', ),
'1.3.6.1.5.5.7.5.1.3': ('id-regCtrl-pkiPublicationInfo', ),
'1.3.6.1.5.5.7.5.1.4': ('id-regCtrl-pkiArchiveOptions', ),
'1.3.6.1.5.5.7.5.1.5': ('id-regCtrl-oldCertID', ),
'1.3.6.1.5.5.7.5.1.6': ('id-regCtrl-protocolEncrKey', ),
'1.3.6.1.5.5.7.5.2': ('id-regInfo', ),
'1.3.6.1.5.5.7.5.2.1': ('id-regInfo-utf8Pairs', ),
'1.3.6.1.5.5.7.5.2.2': ('id-regInfo-certReq', ),
'1.3.6.1.5.5.7.6': ('id-alg', ),
'1.3.6.1.5.5.7.6.1': ('id-alg-des40', ),
'1.3.6.1.5.5.7.6.2': ('id-alg-noSignature', ),
'1.3.6.1.5.5.7.6.3': ('id-alg-dh-sig-hmac-sha1', ),
'1.3.6.1.5.5.7.6.4': ('id-alg-dh-pop', ),
'1.3.6.1.5.5.7.7': ('id-cmc', ),
'1.3.6.1.5.5.7.7.1': ('id-cmc-statusInfo', ),
'1.3.6.1.5.5.7.7.2': ('id-cmc-identification', ),
'1.3.6.1.5.5.7.7.3': ('id-cmc-identityProof', ),
'1.3.6.1.5.5.7.7.4': ('id-cmc-dataReturn', ),
'1.3.6.1.5.5.7.7.5': ('id-cmc-transactionId', ),
'1.3.6.1.5.5.7.7.6': ('id-cmc-senderNonce', ),
'1.3.6.1.5.5.7.7.7': ('id-cmc-recipientNonce', ),
'1.3.6.1.5.5.7.7.8': ('id-cmc-addExtensions', ),
'1.3.6.1.5.5.7.7.9': ('id-cmc-encryptedPOP', ),
'1.3.6.1.5.5.7.7.10': ('id-cmc-decryptedPOP', ),
'1.3.6.1.5.5.7.7.11': ('id-cmc-lraPOPWitness', ),
'1.3.6.1.5.5.7.7.15': ('id-cmc-getCert', ),
'1.3.6.1.5.5.7.7.16': ('id-cmc-getCRL', ),
'1.3.6.1.5.5.7.7.17': ('id-cmc-revokeRequest', ),
'1.3.6.1.5.5.7.7.18': ('id-cmc-regInfo', ),
'1.3.6.1.5.5.7.7.19': ('id-cmc-responseInfo', ),
'1.3.6.1.5.5.7.7.21': ('id-cmc-queryPending', ),
'1.3.6.1.5.5.7.7.22': ('id-cmc-popLinkRandom', ),
'1.3.6.1.5.5.7.7.23': ('id-cmc-popLinkWitness', ),
'1.3.6.1.5.5.7.7.24': ('id-cmc-confirmCertAcceptance', ),
'1.3.6.1.5.5.7.8': ('id-on', ),
'1.3.6.1.5.5.7.8.1': ('id-on-personalData', ),
'1.3.6.1.5.5.7.8.3': ('Permanent Identifier', 'id-on-permanentIdentifier'),
'1.3.6.1.5.5.7.9': ('id-pda', ),
'1.3.6.1.5.5.7.9.1': ('id-pda-dateOfBirth', ),
'1.3.6.1.5.5.7.9.2': ('id-pda-placeOfBirth', ),
'1.3.6.1.5.5.7.9.3': ('id-pda-gender', ),
'1.3.6.1.5.5.7.9.4': ('id-pda-countryOfCitizenship', ),
'1.3.6.1.5.5.7.9.5': ('id-pda-countryOfResidence', ),
'1.3.6.1.5.5.7.10': ('id-aca', ),
'1.3.6.1.5.5.7.10.1': ('id-aca-authenticationInfo', ),
'1.3.6.1.5.5.7.10.2': ('id-aca-accessIdentity', ),
'1.3.6.1.5.5.7.10.3': ('id-aca-chargingIdentity', ),
'1.3.6.1.5.5.7.10.4': ('id-aca-group', ),
'1.3.6.1.5.5.7.10.5': ('id-aca-role', ),
'1.3.6.1.5.5.7.10.6': ('id-aca-encAttrs', ),
'1.3.6.1.5.5.7.11': ('id-qcs', ),
'1.3.6.1.5.5.7.11.1': ('id-qcs-pkixQCSyntax-v1', ),
'1.3.6.1.5.5.7.12': ('id-cct', ),
'1.3.6.1.5.5.7.12.1': ('id-cct-crs', ),
'1.3.6.1.5.5.7.12.2': ('id-cct-PKIData', ),
'1.3.6.1.5.5.7.12.3': ('id-cct-PKIResponse', ),
'1.3.6.1.5.5.7.21': ('id-ppl', ),
'1.3.6.1.5.5.7.21.0': ('Any language', 'id-ppl-anyLanguage'),
'1.3.6.1.5.5.7.21.1': ('Inherit all', 'id-ppl-inheritAll'),
'1.3.6.1.5.5.7.21.2': ('Independent', 'id-ppl-independent'),
'1.3.6.1.5.5.7.48': ('id-ad', ),
'1.3.6.1.5.5.7.48.1': ('OCSP', 'OCSP', 'id-pkix-OCSP'),
'1.3.6.1.5.5.7.48.1.1': ('Basic OCSP Response', 'basicOCSPResponse'),
'1.3.6.1.5.5.7.48.1.2': ('OCSP Nonce', 'Nonce'),
'1.3.6.1.5.5.7.48.1.3': ('OCSP CRL ID', 'CrlID'),
'1.3.6.1.5.5.7.48.1.4': ('Acceptable OCSP Responses', 'acceptableResponses'),
'1.3.6.1.5.5.7.48.1.5': ('OCSP No Check', 'noCheck'),
'1.3.6.1.5.5.7.48.1.6': ('OCSP Archive Cutoff', 'archiveCutoff'),
'1.3.6.1.5.5.7.48.1.7': ('OCSP Service Locator', 'serviceLocator'),
'1.3.6.1.5.5.7.48.1.8': ('Extended OCSP Status', 'extendedStatus'),
'1.3.6.1.5.5.7.48.1.9': ('valid', ),
'1.3.6.1.5.5.7.48.1.10': ('path', ),
'1.3.6.1.5.5.7.48.1.11': ('Trust Root', 'trustRoot'),
'1.3.6.1.5.5.7.48.2': ('CA Issuers', 'caIssuers'),
'1.3.6.1.5.5.7.48.3': ('AD Time Stamping', 'ad_timestamping'),
'1.3.6.1.5.5.7.48.4': ('ad dvcs', 'AD_DVCS'),
'1.3.6.1.5.5.7.48.5': ('CA Repository', 'caRepository'),
'1.3.6.1.5.5.8.1.1': ('hmac-md5', 'HMAC-MD5'),
'1.3.6.1.5.5.8.1.2': ('hmac-sha1', 'HMAC-SHA1'),
'1.3.6.1.6': ('SNMPv2', 'snmpv2'),
'1.3.6.1.7': ('Mail', ),
'1.3.6.1.7.1': ('MIME MHS', 'mime-mhs'),
'1.3.6.1.7.1.1': ('mime-mhs-headings', 'mime-mhs-headings'),
'1.3.6.1.7.1.1.1': ('id-hex-partial-message', 'id-hex-partial-message'),
'1.3.6.1.7.1.1.2': ('id-hex-multipart-message', 'id-hex-multipart-message'),
'1.3.6.1.7.1.2': ('mime-mhs-bodies', 'mime-mhs-bodies'),
'1.3.14.3.2': ('algorithm', 'algorithm'),
'1.3.14.3.2.3': ('md5WithRSA', 'RSA-NP-MD5'),
'1.3.14.3.2.6': ('des-ecb', 'DES-ECB'),
'1.3.14.3.2.7': ('des-cbc', 'DES-CBC'),
'1.3.14.3.2.8': ('des-ofb', 'DES-OFB'),
'1.3.14.3.2.9': ('des-cfb', 'DES-CFB'),
'1.3.14.3.2.11': ('rsaSignature', ),
'1.3.14.3.2.12': ('dsaEncryption-old', 'DSA-old'),
'1.3.14.3.2.13': ('dsaWithSHA', 'DSA-SHA'),
'1.3.14.3.2.15': ('shaWithRSAEncryption', 'RSA-SHA'),
'1.3.14.3.2.17': ('des-ede', 'DES-EDE'),
'1.3.14.3.2.18': ('sha', 'SHA'),
'1.3.14.3.2.26': ('sha1', 'SHA1'),
'1.3.14.3.2.27': ('dsaWithSHA1-old', 'DSA-SHA1-old'),
'1.3.14.3.2.29': ('sha1WithRSA', 'RSA-SHA1-2'),
'1.3.36.3.2.1': ('ripemd160', 'RIPEMD160'),
'1.3.36.3.3.1.2': ('ripemd160WithRSA', 'RSA-RIPEMD160'),
'1.3.36.3.3.2.8.1.1.1': ('brainpoolP160r1', ),
'1.3.36.3.3.2.8.1.1.2': ('brainpoolP160t1', ),
'1.3.36.3.3.2.8.1.1.3': ('brainpoolP192r1', ),
'1.3.36.3.3.2.8.1.1.4': ('brainpoolP192t1', ),
'1.3.36.3.3.2.8.1.1.5': ('brainpoolP224r1', ),
'1.3.36.3.3.2.8.1.1.6': ('brainpoolP224t1', ),
'1.3.36.3.3.2.8.1.1.7': ('brainpoolP256r1', ),
'1.3.36.3.3.2.8.1.1.8': ('brainpoolP256t1', ),
'1.3.36.3.3.2.8.1.1.9': ('brainpoolP320r1', ),
'1.3.36.3.3.2.8.1.1.10': ('brainpoolP320t1', ),
'1.3.36.3.3.2.8.1.1.11': ('brainpoolP384r1', ),
'1.3.36.3.3.2.8.1.1.12': ('brainpoolP384t1', ),
'1.3.36.3.3.2.8.1.1.13': ('brainpoolP512r1', ),
'1.3.36.3.3.2.8.1.1.14': ('brainpoolP512t1', ),
'1.3.36.8.3.3': ('Professional Information or basis for Admission', 'x509ExtAdmission'),
'1.3.101.1.4.1': ('Strong Extranet ID', 'SXNetID'),
'1.3.101.110': ('X25519', ),
'1.3.101.111': ('X448', ),
'1.3.101.112': ('ED25519', ),
'1.3.101.113': ('ED448', ),
'1.3.111': ('ieee', ),
'1.3.111.2.1619': ('IEEE Security in Storage Working Group', 'ieee-siswg'),
'1.3.111.2.1619.0.1.1': ('aes-128-xts', 'AES-128-XTS'),
'1.3.111.2.1619.0.1.2': ('aes-256-xts', 'AES-256-XTS'),
'1.3.132': ('certicom-arc', ),
'1.3.132.0': ('secg_ellipticCurve', ),
'1.3.132.0.1': ('sect163k1', ),
'1.3.132.0.2': ('sect163r1', ),
'1.3.132.0.3': ('sect239k1', ),
'1.3.132.0.4': ('sect113r1', ),
'1.3.132.0.5': ('sect113r2', ),
'1.3.132.0.6': ('secp112r1', ),
'1.3.132.0.7': ('secp112r2', ),
'1.3.132.0.8': ('secp160r1', ),
'1.3.132.0.9': ('secp160k1', ),
'1.3.132.0.10': ('secp256k1', ),
'1.3.132.0.15': ('sect163r2', ),
'1.3.132.0.16': ('sect283k1', ),
'1.3.132.0.17': ('sect283r1', ),
'1.3.132.0.22': ('sect131r1', ),
'1.3.132.0.23': ('sect131r2', ),
'1.3.132.0.24': ('sect193r1', ),
'1.3.132.0.25': ('sect193r2', ),
'1.3.132.0.26': ('sect233k1', ),
'1.3.132.0.27': ('sect233r1', ),
'1.3.132.0.28': ('secp128r1', ),
'1.3.132.0.29': ('secp128r2', ),
'1.3.132.0.30': ('secp160r2', ),
'1.3.132.0.31': ('secp192k1', ),
'1.3.132.0.32': ('secp224k1', ),
'1.3.132.0.33': ('secp224r1', ),
'1.3.132.0.34': ('secp384r1', ),
'1.3.132.0.35': ('secp521r1', ),
'1.3.132.0.36': ('sect409k1', ),
'1.3.132.0.37': ('sect409r1', ),
'1.3.132.0.38': ('sect571k1', ),
'1.3.132.0.39': ('sect571r1', ),
'1.3.132.1': ('secg-scheme', ),
'1.3.132.1.11.0': ('dhSinglePass-stdDH-sha224kdf-scheme', ),
'1.3.132.1.11.1': ('dhSinglePass-stdDH-sha256kdf-scheme', ),
'1.3.132.1.11.2': ('dhSinglePass-stdDH-sha384kdf-scheme', ),
'1.3.132.1.11.3': ('dhSinglePass-stdDH-sha512kdf-scheme', ),
'1.3.132.1.14.0': ('dhSinglePass-cofactorDH-sha224kdf-scheme', ),
'1.3.132.1.14.1': ('dhSinglePass-cofactorDH-sha256kdf-scheme', ),
'1.3.132.1.14.2': ('dhSinglePass-cofactorDH-sha384kdf-scheme', ),
'1.3.132.1.14.3': ('dhSinglePass-cofactorDH-sha512kdf-scheme', ),
'1.3.133.16.840.63.0': ('x9-63-scheme', ),
'1.3.133.16.840.63.0.2': ('dhSinglePass-stdDH-sha1kdf-scheme', ),
'1.3.133.16.840.63.0.3': ('dhSinglePass-cofactorDH-sha1kdf-scheme', ),
'2': ('joint-iso-itu-t', 'JOINT-ISO-ITU-T', 'joint-iso-ccitt'),
'2.5': ('directory services (X.500)', 'X500'),
'2.5.1.5': ('Selected Attribute Types', 'selected-attribute-types'),
'2.5.1.5.55': ('clearance', ),
'2.5.4': ('X509', ),
'2.5.4.3': ('commonName', 'CN'),
'2.5.4.4': ('surname', 'SN'),
'2.5.4.5': ('serialNumber', ),
'2.5.4.6': ('countryName', 'C'),
'2.5.4.7': ('localityName', 'L'),
'2.5.4.8': ('stateOrProvinceName', 'ST'),
'2.5.4.9': ('streetAddress', 'street'),
'2.5.4.10': ('organizationName', 'O'),
'2.5.4.11': ('organizationalUnitName', 'OU'),
'2.5.4.12': ('title', 'title'),
'2.5.4.13': ('description', ),
'2.5.4.14': ('searchGuide', ),
'2.5.4.15': ('businessCategory', ),
'2.5.4.16': ('postalAddress', ),
'2.5.4.17': ('postalCode', ),
'2.5.4.18': ('postOfficeBox', ),
'2.5.4.19': ('physicalDeliveryOfficeName', ),
'2.5.4.20': ('telephoneNumber', ),
'2.5.4.21': ('telexNumber', ),
'2.5.4.22': ('teletexTerminalIdentifier', ),
'2.5.4.23': ('facsimileTelephoneNumber', ),
'2.5.4.24': ('x121Address', ),
'2.5.4.25': ('internationaliSDNNumber', ),
'2.5.4.26': ('registeredAddress', ),
'2.5.4.27': ('destinationIndicator', ),
'2.5.4.28': ('preferredDeliveryMethod', ),
'2.5.4.29': ('presentationAddress', ),
'2.5.4.30': ('supportedApplicationContext', ),
'2.5.4.31': ('member', ),
'2.5.4.32': ('owner', ),
'2.5.4.33': ('roleOccupant', ),
'2.5.4.34': ('seeAlso', ),
'2.5.4.35': ('userPassword', ),
'2.5.4.36': ('userCertificate', ),
'2.5.4.37': ('cACertificate', ),
'2.5.4.38': ('authorityRevocationList', ),
'2.5.4.39': ('certificateRevocationList', ),
'2.5.4.40': ('crossCertificatePair', ),
'2.5.4.41': ('name', 'name'),
'2.5.4.42': ('givenName', 'GN'),
'2.5.4.43': ('initials', 'initials'),
'2.5.4.44': ('generationQualifier', ),
'2.5.4.45': ('x500UniqueIdentifier', ),
'2.5.4.46': ('dnQualifier', 'dnQualifier'),
'2.5.4.47': ('enhancedSearchGuide', ),
'2.5.4.48': ('protocolInformation', ),
'2.5.4.49': ('distinguishedName', ),
'2.5.4.50': ('uniqueMember', ),
'2.5.4.51': ('houseIdentifier', ),
'2.5.4.52': ('supportedAlgorithms', ),
'2.5.4.53': ('deltaRevocationList', ),
'2.5.4.54': ('dmdName', ),
'2.5.4.65': ('pseudonym', ),
'2.5.4.72': ('role', 'role'),
'2.5.4.97': ('organizationIdentifier', ),
'2.5.4.98': ('countryCode3c', 'c3'),
'2.5.4.99': ('countryCode3n', 'n3'),
'2.5.4.100': ('dnsName', ),
'2.5.8': ('directory services - algorithms', 'X500algorithms'),
'2.5.8.1.1': ('rsa', 'RSA'),
'2.5.8.3.100': ('mdc2WithRSA', 'RSA-MDC2'),
'2.5.8.3.101': ('mdc2', 'MDC2'),
'2.5.29': ('id-ce', ),
'2.5.29.9': ('X509v3 Subject Directory Attributes', 'subjectDirectoryAttributes'),
'2.5.29.14': ('X509v3 Subject Key Identifier', 'subjectKeyIdentifier'),
'2.5.29.15': ('X509v3 Key Usage', 'keyUsage'),
'2.5.29.16': ('X509v3 Private Key Usage Period', 'privateKeyUsagePeriod'),
'2.5.29.17': ('X509v3 Subject Alternative Name', 'subjectAltName'),
'2.5.29.18': ('X509v3 Issuer Alternative Name', 'issuerAltName'),
'2.5.29.19': ('X509v3 Basic Constraints', 'basicConstraints'),
'2.5.29.20': ('X509v3 CRL Number', 'crlNumber'),
'2.5.29.21': ('X509v3 CRL Reason Code', 'CRLReason'),
'2.5.29.23': ('Hold Instruction Code', 'holdInstructionCode'),
'2.5.29.24': ('Invalidity Date', 'invalidityDate'),
'2.5.29.27': ('X509v3 Delta CRL Indicator', 'deltaCRL'),
'2.5.29.28': ('X509v3 Issuing Distribution Point', 'issuingDistributionPoint'),
'2.5.29.29': ('X509v3 Certificate Issuer', 'certificateIssuer'),
'2.5.29.30': ('X509v3 Name Constraints', 'nameConstraints'),
'2.5.29.31': ('X509v3 CRL Distribution Points', 'crlDistributionPoints'),
'2.5.29.32': ('X509v3 Certificate Policies', 'certificatePolicies'),
'2.5.29.32.0': ('X509v3 Any Policy', 'anyPolicy'),
'2.5.29.33': ('X509v3 Policy Mappings', 'policyMappings'),
'2.5.29.35': ('X509v3 Authority Key Identifier', 'authorityKeyIdentifier'),
'2.5.29.36': ('X509v3 Policy Constraints', 'policyConstraints'),
'2.5.29.37': ('X509v3 Extended Key Usage', 'extendedKeyUsage'),
'2.5.29.37.0': ('Any Extended Key Usage', 'anyExtendedKeyUsage'),
'2.5.29.46': ('X509v3 Freshest CRL', 'freshestCRL'),
'2.5.29.54': ('X509v3 Inhibit Any Policy', 'inhibitAnyPolicy'),
'2.5.29.55': ('X509v3 AC Targeting', 'targetInformation'),
'2.5.29.56': ('X509v3 No Revocation Available', 'noRevAvail'),
'2.16.840.1.101.3': ('csor', ),
'2.16.840.1.101.3.4': ('nistAlgorithms', ),
'2.16.840.1.101.3.4.1': ('aes', ),
'2.16.840.1.101.3.4.1.1': ('aes-128-ecb', 'AES-128-ECB'),
'2.16.840.1.101.3.4.1.2': ('aes-128-cbc', 'AES-128-CBC'),
'2.16.840.1.101.3.4.1.3': ('aes-128-ofb', 'AES-128-OFB'),
'2.16.840.1.101.3.4.1.4': ('aes-128-cfb', 'AES-128-CFB'),
'2.16.840.1.101.3.4.1.5': ('id-aes128-wrap', ),
'2.16.840.1.101.3.4.1.6': ('aes-128-gcm', 'id-aes128-GCM'),
'2.16.840.1.101.3.4.1.7': ('aes-128-ccm', 'id-aes128-CCM'),
'2.16.840.1.101.3.4.1.8': ('id-aes128-wrap-pad', ),
'2.16.840.1.101.3.4.1.21': ('aes-192-ecb', 'AES-192-ECB'),
'2.16.840.1.101.3.4.1.22': ('aes-192-cbc', 'AES-192-CBC'),
'2.16.840.1.101.3.4.1.23': ('aes-192-ofb', 'AES-192-OFB'),
'2.16.840.1.101.3.4.1.24': ('aes-192-cfb', 'AES-192-CFB'),
'2.16.840.1.101.3.4.1.25': ('id-aes192-wrap', ),
'2.16.840.1.101.3.4.1.26': ('aes-192-gcm', 'id-aes192-GCM'),
'2.16.840.1.101.3.4.1.27': ('aes-192-ccm', 'id-aes192-CCM'),
'2.16.840.1.101.3.4.1.28': ('id-aes192-wrap-pad', ),
'2.16.840.1.101.3.4.1.41': ('aes-256-ecb', 'AES-256-ECB'),
'2.16.840.1.101.3.4.1.42': ('aes-256-cbc', 'AES-256-CBC'),
'2.16.840.1.101.3.4.1.43': ('aes-256-ofb', 'AES-256-OFB'),
'2.16.840.1.101.3.4.1.44': ('aes-256-cfb', 'AES-256-CFB'),
'2.16.840.1.101.3.4.1.45': ('id-aes256-wrap', ),
'2.16.840.1.101.3.4.1.46': ('aes-256-gcm', 'id-aes256-GCM'),
'2.16.840.1.101.3.4.1.47': ('aes-256-ccm', 'id-aes256-CCM'),
'2.16.840.1.101.3.4.1.48': ('id-aes256-wrap-pad', ),
'2.16.840.1.101.3.4.2': ('nist_hashalgs', ),
'2.16.840.1.101.3.4.2.1': ('sha256', 'SHA256'),
'2.16.840.1.101.3.4.2.2': ('sha384', 'SHA384'),
'2.16.840.1.101.3.4.2.3': ('sha512', 'SHA512'),
'2.16.840.1.101.3.4.2.4': ('sha224', 'SHA224'),
'2.16.840.1.101.3.4.2.5': ('sha512-224', 'SHA512-224'),
'2.16.840.1.101.3.4.2.6': ('sha512-256', 'SHA512-256'),
'2.16.840.1.101.3.4.2.7': ('sha3-224', 'SHA3-224'),
'2.16.840.1.101.3.4.2.8': ('sha3-256', 'SHA3-256'),
'2.16.840.1.101.3.4.2.9': ('sha3-384', 'SHA3-384'),
'2.16.840.1.101.3.4.2.10': ('sha3-512', 'SHA3-512'),
'2.16.840.1.101.3.4.2.11': ('shake128', 'SHAKE128'),
'2.16.840.1.101.3.4.2.12': ('shake256', 'SHAKE256'),
'2.16.840.1.101.3.4.2.13': ('hmac-sha3-224', 'id-hmacWithSHA3-224'),
'2.16.840.1.101.3.4.2.14': ('hmac-sha3-256', 'id-hmacWithSHA3-256'),
'2.16.840.1.101.3.4.2.15': ('hmac-sha3-384', 'id-hmacWithSHA3-384'),
'2.16.840.1.101.3.4.2.16': ('hmac-sha3-512', 'id-hmacWithSHA3-512'),
'2.16.840.1.101.3.4.3': ('dsa_with_sha2', 'sigAlgs'),
'2.16.840.1.101.3.4.3.1': ('dsa_with_SHA224', ),
'2.16.840.1.101.3.4.3.2': ('dsa_with_SHA256', ),
'2.16.840.1.101.3.4.3.3': ('dsa_with_SHA384', 'id-dsa-with-sha384'),
'2.16.840.1.101.3.4.3.4': ('dsa_with_SHA512', 'id-dsa-with-sha512'),
'2.16.840.1.101.3.4.3.5': ('dsa_with_SHA3-224', 'id-dsa-with-sha3-224'),
'2.16.840.1.101.3.4.3.6': ('dsa_with_SHA3-256', 'id-dsa-with-sha3-256'),
'2.16.840.1.101.3.4.3.7': ('dsa_with_SHA3-384', 'id-dsa-with-sha3-384'),
'2.16.840.1.101.3.4.3.8': ('dsa_with_SHA3-512', 'id-dsa-with-sha3-512'),
'2.16.840.1.101.3.4.3.9': ('ecdsa_with_SHA3-224', 'id-ecdsa-with-sha3-224'),
'2.16.840.1.101.3.4.3.10': ('ecdsa_with_SHA3-256', 'id-ecdsa-with-sha3-256'),
'2.16.840.1.101.3.4.3.11': ('ecdsa_with_SHA3-384', 'id-ecdsa-with-sha3-384'),
'2.16.840.1.101.3.4.3.12': ('ecdsa_with_SHA3-512', 'id-ecdsa-with-sha3-512'),
'2.16.840.1.101.3.4.3.13': ('RSA-SHA3-224', 'id-rsassa-pkcs1-v1_5-with-sha3-224'),
'2.16.840.1.101.3.4.3.14': ('RSA-SHA3-256', 'id-rsassa-pkcs1-v1_5-with-sha3-256'),
'2.16.840.1.101.3.4.3.15': ('RSA-SHA3-384', 'id-rsassa-pkcs1-v1_5-with-sha3-384'),
'2.16.840.1.101.3.4.3.16': ('RSA-SHA3-512', 'id-rsassa-pkcs1-v1_5-with-sha3-512'),
'2.16.840.1.113730': ('Netscape Communications Corp.', 'Netscape'),
'2.16.840.1.113730.1': ('Netscape Certificate Extension', 'nsCertExt'),
'2.16.840.1.113730.1.1': ('Netscape Cert Type', 'nsCertType'),
'2.16.840.1.113730.1.2': ('Netscape Base Url', 'nsBaseUrl'),
'2.16.840.1.113730.1.3': ('Netscape Revocation Url', 'nsRevocationUrl'),
'2.16.840.1.113730.1.4': ('Netscape CA Revocation Url', 'nsCaRevocationUrl'),
'2.16.840.1.113730.1.7': ('Netscape Renewal Url', 'nsRenewalUrl'),
'2.16.840.1.113730.1.8': ('Netscape CA Policy Url', 'nsCaPolicyUrl'),
'2.16.840.1.113730.1.12': ('Netscape SSL Server Name', 'nsSslServerName'),
'2.16.840.1.113730.1.13': ('Netscape Comment', 'nsComment'),
'2.16.840.1.113730.2': ('Netscape Data Type', 'nsDataType'),
'2.16.840.1.113730.2.5': ('Netscape Certificate Sequence', 'nsCertSequence'),
'2.16.840.1.113730.4.1': ('Netscape Server Gated Crypto', 'nsSGC'),
'2.23': ('International Organizations', 'international-organizations'),
'2.23.42': ('Secure Electronic Transactions', 'id-set'),
'2.23.42.0': ('content types', 'set-ctype'),
'2.23.42.0.0': ('setct-PANData', ),
'2.23.42.0.1': ('setct-PANToken', ),
'2.23.42.0.2': ('setct-PANOnly', ),
'2.23.42.0.3': ('setct-OIData', ),
'2.23.42.0.4': ('setct-PI', ),
'2.23.42.0.5': ('setct-PIData', ),
'2.23.42.0.6': ('setct-PIDataUnsigned', ),
'2.23.42.0.7': ('setct-HODInput', ),
'2.23.42.0.8': ('setct-AuthResBaggage', ),
'2.23.42.0.9': ('setct-AuthRevReqBaggage', ),
'2.23.42.0.10': ('setct-AuthRevResBaggage', ),
'2.23.42.0.11': ('setct-CapTokenSeq', ),
'2.23.42.0.12': ('setct-PInitResData', ),
'2.23.42.0.13': ('setct-PI-TBS', ),
'2.23.42.0.14': ('setct-PResData', ),
'2.23.42.0.16': ('setct-AuthReqTBS', ),
'2.23.42.0.17': ('setct-AuthResTBS', ),
'2.23.42.0.18': ('setct-AuthResTBSX', ),
'2.23.42.0.19': ('setct-AuthTokenTBS', ),
'2.23.42.0.20': ('setct-CapTokenData', ),
'2.23.42.0.21': ('setct-CapTokenTBS', ),
'2.23.42.0.22': ('setct-AcqCardCodeMsg', ),
'2.23.42.0.23': ('setct-AuthRevReqTBS', ),
'2.23.42.0.24': ('setct-AuthRevResData', ),
'2.23.42.0.25': ('setct-AuthRevResTBS', ),
'2.23.42.0.26': ('setct-CapReqTBS', ),
'2.23.42.0.27': ('setct-CapReqTBSX', ),
'2.23.42.0.28': ('setct-CapResData', ),
'2.23.42.0.29': ('setct-CapRevReqTBS', ),
'2.23.42.0.30': ('setct-CapRevReqTBSX', ),
'2.23.42.0.31': ('setct-CapRevResData', ),
'2.23.42.0.32': ('setct-CredReqTBS', ),
'2.23.42.0.33': ('setct-CredReqTBSX', ),
'2.23.42.0.34': ('setct-CredResData', ),
'2.23.42.0.35': ('setct-CredRevReqTBS', ),
'2.23.42.0.36': ('setct-CredRevReqTBSX', ),
'2.23.42.0.37': ('setct-CredRevResData', ),
'2.23.42.0.38': ('setct-PCertReqData', ),
'2.23.42.0.39': ('setct-PCertResTBS', ),
'2.23.42.0.40': ('setct-BatchAdminReqData', ),
'2.23.42.0.41': ('setct-BatchAdminResData', ),
'2.23.42.0.42': ('setct-CardCInitResTBS', ),
'2.23.42.0.43': ('setct-MeAqCInitResTBS', ),
'2.23.42.0.44': ('setct-RegFormResTBS', ),
'2.23.42.0.45': ('setct-CertReqData', ),
'2.23.42.0.46': ('setct-CertReqTBS', ),
'2.23.42.0.47': ('setct-CertResData', ),
'2.23.42.0.48': ('setct-CertInqReqTBS', ),
'2.23.42.0.49': ('setct-ErrorTBS', ),
'2.23.42.0.50': ('setct-PIDualSignedTBE', ),
'2.23.42.0.51': ('setct-PIUnsignedTBE', ),
'2.23.42.0.52': ('setct-AuthReqTBE', ),
'2.23.42.0.53': ('setct-AuthResTBE', ),
'2.23.42.0.54': ('setct-AuthResTBEX', ),
'2.23.42.0.55': ('setct-AuthTokenTBE', ),
'2.23.42.0.56': ('setct-CapTokenTBE', ),
'2.23.42.0.57': ('setct-CapTokenTBEX', ),
'2.23.42.0.58': ('setct-AcqCardCodeMsgTBE', ),
'2.23.42.0.59': ('setct-AuthRevReqTBE', ),
'2.23.42.0.60': ('setct-AuthRevResTBE', ),
'2.23.42.0.61': ('setct-AuthRevResTBEB', ),
'2.23.42.0.62': ('setct-CapReqTBE', ),
'2.23.42.0.63': ('setct-CapReqTBEX', ),
'2.23.42.0.64': ('setct-CapResTBE', ),
'2.23.42.0.65': ('setct-CapRevReqTBE', ),
'2.23.42.0.66': ('setct-CapRevReqTBEX', ),
'2.23.42.0.67': ('setct-CapRevResTBE', ),
'2.23.42.0.68': ('setct-CredReqTBE', ),
'2.23.42.0.69': ('setct-CredReqTBEX', ),
'2.23.42.0.70': ('setct-CredResTBE', ),
'2.23.42.0.71': ('setct-CredRevReqTBE', ),
'2.23.42.0.72': ('setct-CredRevReqTBEX', ),
'2.23.42.0.73': ('setct-CredRevResTBE', ),
'2.23.42.0.74': ('setct-BatchAdminReqTBE', ),
'2.23.42.0.75': ('setct-BatchAdminResTBE', ),
'2.23.42.0.76': ('setct-RegFormReqTBE', ),
'2.23.42.0.77': ('setct-CertReqTBE', ),
'2.23.42.0.78': ('setct-CertReqTBEX', ),
'2.23.42.0.79': ('setct-CertResTBE', ),
'2.23.42.0.80': ('setct-CRLNotificationTBS', ),
'2.23.42.0.81': ('setct-CRLNotificationResTBS', ),
'2.23.42.0.82': ('setct-BCIDistributionTBS', ),
'2.23.42.1': ('message extensions', 'set-msgExt'),
'2.23.42.1.1': ('generic cryptogram', 'setext-genCrypt'),
'2.23.42.1.3': ('merchant initiated auth', 'setext-miAuth'),
'2.23.42.1.4': ('setext-pinSecure', ),
'2.23.42.1.5': ('setext-pinAny', ),
'2.23.42.1.7': ('setext-track2', ),
'2.23.42.1.8': ('additional verification', 'setext-cv'),
'2.23.42.3': ('set-attr', ),
'2.23.42.3.0': ('setAttr-Cert', ),
'2.23.42.3.0.0': ('set-rootKeyThumb', ),
'2.23.42.3.0.1': ('set-addPolicy', ),
'2.23.42.3.1': ('payment gateway capabilities', 'setAttr-PGWYcap'),
'2.23.42.3.2': ('setAttr-TokenType', ),
'2.23.42.3.2.1': ('setAttr-Token-EMV', ),
'2.23.42.3.2.2': ('setAttr-Token-B0Prime', ),
'2.23.42.3.3': ('issuer capabilities', 'setAttr-IssCap'),
'2.23.42.3.3.3': ('setAttr-IssCap-CVM', ),
'2.23.42.3.3.3.1': ('generate cryptogram', 'setAttr-GenCryptgrm'),
'2.23.42.3.3.4': ('setAttr-IssCap-T2', ),
'2.23.42.3.3.4.1': ('encrypted track 2', 'setAttr-T2Enc'),
'2.23.42.3.3.4.2': ('cleartext track 2', 'setAttr-T2cleartxt'),
'2.23.42.3.3.5': ('setAttr-IssCap-Sig', ),
'2.23.42.3.3.5.1': ('ICC or token signature', 'setAttr-TokICCsig'),
'2.23.42.3.3.5.2': ('secure device signature', 'setAttr-SecDevSig'),
'2.23.42.5': ('set-policy', ),
'2.23.42.5.0': ('set-policy-root', ),
'2.23.42.7': ('certificate extensions', 'set-certExt'),
'2.23.42.7.0': ('setCext-hashedRoot', ),
'2.23.42.7.1': ('setCext-certType', ),
'2.23.42.7.2': ('setCext-merchData', ),
'2.23.42.7.3': ('setCext-cCertRequired', ),
'2.23.42.7.4': ('setCext-tunneling', ),
'2.23.42.7.5': ('setCext-setExt', ),
'2.23.42.7.6': ('setCext-setQualf', ),
'2.23.42.7.7': ('setCext-PGWYcapabilities', ),
'2.23.42.7.8': ('setCext-TokenIdentifier', ),
'2.23.42.7.9': ('setCext-Track2Data', ),
'2.23.42.7.10': ('setCext-TokenType', ),
'2.23.42.7.11': ('setCext-IssuerCapabilities', ),
'2.23.42.8': ('set-brand', ),
'2.23.42.8.1': ('set-brand-IATA-ATA', ),
'2.23.42.8.4': ('set-brand-Visa', ),
'2.23.42.8.5': ('set-brand-MasterCard', ),
'2.23.42.8.30': ('set-brand-Diners', ),
'2.23.42.8.34': ('set-brand-AmericanExpress', ),
'2.23.42.8.35': ('set-brand-JCB', ),
'2.23.42.8.6011': ('set-brand-Novus', ),
'2.23.43': ('wap', ),
'2.23.43.1': ('wap-wsg', ),
'2.23.43.1.4': ('wap-wsg-idm-ecid', ),
'2.23.43.1.4.1': ('wap-wsg-idm-ecid-wtls1', ),
'2.23.43.1.4.3': ('wap-wsg-idm-ecid-wtls3', ),
'2.23.43.1.4.4': ('wap-wsg-idm-ecid-wtls4', ),
'2.23.43.1.4.5': ('wap-wsg-idm-ecid-wtls5', ),
'2.23.43.1.4.6': ('wap-wsg-idm-ecid-wtls6', ),
'2.23.43.1.4.7': ('wap-wsg-idm-ecid-wtls7', ),
'2.23.43.1.4.8': ('wap-wsg-idm-ecid-wtls8', ),
'2.23.43.1.4.9': ('wap-wsg-idm-ecid-wtls9', ),
'2.23.43.1.4.10': ('wap-wsg-idm-ecid-wtls10', ),
'2.23.43.1.4.11': ('wap-wsg-idm-ecid-wtls11', ),
'2.23.43.1.4.12': ('wap-wsg-idm-ecid-wtls12', ),
}
# #####################################################################################
# #####################################################################################
_OID_LOOKUP = dict()
_NORMALIZE_NAMES = dict()
_NORMALIZE_NAMES_SHORT = dict()
for dotted, names in _OID_MAP.items():
for name in names:
if name in _NORMALIZE_NAMES and _OID_LOOKUP[name] != dotted:
raise AssertionError(
'Name collision during setup: "{0}" for OIDs {1} and {2}'
.format(name, dotted, _OID_LOOKUP[name])
)
_NORMALIZE_NAMES[name] = names[0]
_NORMALIZE_NAMES_SHORT[name] = names[-1]
_OID_LOOKUP[name] = dotted
for alias, original in [('userID', 'userId')]:
if alias in _NORMALIZE_NAMES:
raise AssertionError(
'Name collision during adding aliases: "{0}" (alias for "{1}") is already mapped to OID {2}'
.format(alias, original, _OID_LOOKUP[alias])
)
_NORMALIZE_NAMES[alias] = original
_NORMALIZE_NAMES_SHORT[alias] = _NORMALIZE_NAMES_SHORT[original]
_OID_LOOKUP[alias] = _OID_LOOKUP[original]
def pyopenssl_normalize_name(name, short=False):
nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(name))
if nid != 0:
b_name = OpenSSL._util.lib.OBJ_nid2ln(nid)
name = to_text(OpenSSL._util.ffi.string(b_name))
if short:
return _NORMALIZE_NAMES_SHORT.get(name, name)
else:
return _NORMALIZE_NAMES.get(name, name)
# #####################################################################################
# #####################################################################################
# # This excerpt is dual licensed under the terms of the Apache License, Version
# # 2.0, and the BSD License. See the LICENSE file at
# # https://github.com/pyca/cryptography/blob/master/LICENSE for complete details.
# #
# # Adapted from cryptography's hazmat/backends/openssl/decode_asn1.py
# #
# # Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
# # Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
# #
# # Relevant commits from cryptography project (https://github.com/pyca/cryptography):
# # pyca/cryptography@719d536dd691e84e208534798f2eb4f82aaa2e07
# # pyca/cryptography@5ab6d6a5c05572bd1c75f05baf264a2d0001894a
# # pyca/cryptography@2e776e20eb60378e0af9b7439000d0e80da7c7e3
# # pyca/cryptography@fb309ed24647d1be9e319b61b1f2aa8ebb87b90b
# # pyca/cryptography@2917e460993c475c72d7146c50dc3bbc2414280d
# # pyca/cryptography@3057f91ea9a05fb593825006d87a391286a4d828
# # pyca/cryptography@d607dd7e5bc5c08854ec0c9baff70ba4a35be36f
def _obj2txt(openssl_lib, openssl_ffi, obj):
# Set to 80 on the recommendation of
# https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values
#
# But OIDs longer than this occur in real life (e.g. Active
# Directory makes some very long OIDs). So we need to detect
# and properly handle the case where the default buffer is not
# big enough.
#
buf_len = 80
buf = openssl_ffi.new("char[]", buf_len)
# 'res' is the number of bytes that *would* be written if the
# buffer is large enough. If 'res' > buf_len - 1, we need to
# alloc a big-enough buffer and go again.
res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
if res > buf_len - 1: # account for terminating null byte
buf_len = res + 1
buf = openssl_ffi.new("char[]", buf_len)
res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
return openssl_ffi.buffer(buf, res)[:].decode()
# #####################################################################################
# #####################################################################################
def cryptography_get_extensions_from_cert(cert):
# Since cryptography won't give us the DER value for an extension
# (that is only stored for unrecognized extensions), we have to re-do
# the extension parsing outselves.
result = dict()
backend = cert._backend
x509_obj = cert._x509
for i in range(backend._lib.X509_get_ext_count(x509_obj)):
ext = backend._lib.X509_get_ext(x509_obj, i)
if ext == backend._ffi.NULL:
continue
crit = backend._lib.X509_EXTENSION_get_critical(ext)
data = backend._lib.X509_EXTENSION_get_data(ext)
backend.openssl_assert(data != backend._ffi.NULL)
der = backend._ffi.buffer(data.data, data.length)[:]
entry = dict(
critical=(crit == 1),
value=base64.b64encode(der),
)
oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
result[oid] = entry
return result
def cryptography_get_extensions_from_csr(csr):
# Since cryptography won't give us the DER value for an extension
# (that is only stored for unrecognized extensions), we have to re-do
# the extension parsing outselves.
result = dict()
backend = csr._backend
extensions = backend._lib.X509_REQ_get_extensions(csr._x509_req)
extensions = backend._ffi.gc(
extensions,
lambda ext: backend._lib.sk_X509_EXTENSION_pop_free(
ext,
backend._ffi.addressof(backend._lib._original_lib, "X509_EXTENSION_free")
)
)
for i in range(backend._lib.sk_X509_EXTENSION_num(extensions)):
ext = backend._lib.sk_X509_EXTENSION_value(extensions, i)
if ext == backend._ffi.NULL:
continue
crit = backend._lib.X509_EXTENSION_get_critical(ext)
data = backend._lib.X509_EXTENSION_get_data(ext)
backend.openssl_assert(data != backend._ffi.NULL)
der = backend._ffi.buffer(data.data, data.length)[:]
entry = dict(
critical=(crit == 1),
value=base64.b64encode(der),
)
oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
result[oid] = entry
return result
def pyopenssl_get_extensions_from_cert(cert):
# While pyOpenSSL allows us to get an extension's DER value, it won't
# give us the dotted string for an OID. So we have to do some magic to
# get hold of it.
result = dict()
ext_count = cert.get_extension_count()
for i in range(0, ext_count):
ext = cert.get_extension(i)
entry = dict(
critical=bool(ext.get_critical()),
value=base64.b64encode(ext.get_data()),
)
oid = _obj2txt(
OpenSSL._util.lib,
OpenSSL._util.ffi,
OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
)
# This could also be done a bit simpler:
#
# oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
#
# Unfortunately this gives the wrong result in case the linked OpenSSL
# doesn't know the OID. That's why we have to get the OID dotted string
# similarly to how cryptography does it.
result[oid] = entry
return result
def pyopenssl_get_extensions_from_csr(csr):
# While pyOpenSSL allows us to get an extension's DER value, it won't
# give us the dotted string for an OID. So we have to do some magic to
# get hold of it.
result = dict()
for ext in csr.get_extensions():
entry = dict(
critical=bool(ext.get_critical()),
value=base64.b64encode(ext.get_data()),
)
oid = _obj2txt(
OpenSSL._util.lib,
OpenSSL._util.ffi,
OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
)
# This could also be done a bit simpler:
#
# oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
#
# Unfortunately this gives the wrong result in case the linked OpenSSL
# doesn't know the OID. That's why we have to get the OID dotted string
# similarly to how cryptography does it.
result[oid] = entry
return result
def cryptography_name_to_oid(name):
dotted = _OID_LOOKUP.get(name)
if dotted is None:
raise OpenSSLObjectError('Cannot find OID for "{0}"'.format(name))
return x509.oid.ObjectIdentifier(dotted)
def cryptography_oid_to_name(oid, short=False):
dotted_string = oid.dotted_string
names = _OID_MAP.get(dotted_string)
name = names[0] if names else oid._name
if short:
return _NORMALIZE_NAMES_SHORT.get(name, name)
else:
return _NORMALIZE_NAMES.get(name, name)
def cryptography_get_name(name):
'''
Given a name string, returns a cryptography x509.Name object.
Raises an OpenSSLObjectError if the name is unknown or cannot be parsed.
'''
try:
if name.startswith('DNS:'):
return x509.DNSName(to_text(name[4:]))
if name.startswith('IP:'):
return x509.IPAddress(ipaddress.ip_address(to_text(name[3:])))
if name.startswith('email:'):
return x509.RFC822Name(to_text(name[6:]))
if name.startswith('URI:'):
return x509.UniformResourceIdentifier(to_text(name[4:]))
except Exception as e:
raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}": {1}'.format(name, e))
if ':' not in name:
raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (forgot "DNS:" prefix?)'.format(name))
raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (potentially unsupported by cryptography backend)'.format(name))
def _get_hex(bytesstr):
if bytesstr is None:
return bytesstr
data = binascii.hexlify(bytesstr)
data = to_text(b':'.join(data[i:i + 2] for i in range(0, len(data), 2)))
return data
def cryptography_decode_name(name):
'''
Given a cryptography x509.Name object, returns a string.
Raises an OpenSSLObjectError if the name is not supported.
'''
if isinstance(name, x509.DNSName):
return 'DNS:{0}'.format(name.value)
if isinstance(name, x509.IPAddress):
return 'IP:{0}'.format(name.value.compressed)
if isinstance(name, x509.RFC822Name):
return 'email:{0}'.format(name.value)
if isinstance(name, x509.UniformResourceIdentifier):
return 'URI:{0}'.format(name.value)
if isinstance(name, x509.DirectoryName):
# FIXME: test
return 'DirName:' + ''.join(['/{0}:{1}'.format(attribute.oid._name, attribute.value) for attribute in name.value])
if isinstance(name, x509.RegisteredID):
# FIXME: test
return 'RegisteredID:{0}'.format(name.value)
if isinstance(name, x509.OtherName):
# FIXME: test
return '{0}:{1}'.format(name.type_id.dotted_string, _get_hex(name.value))
raise OpenSSLObjectError('Cannot decode name "{0}"'.format(name))
def _cryptography_get_keyusage(usage):
'''
Given a key usage identifier string, returns the parameter name used by cryptography's x509.KeyUsage().
Raises an OpenSSLObjectError if the identifier is unknown.
'''
if usage in ('Digital Signature', 'digitalSignature'):
return 'digital_signature'
if usage in ('Non Repudiation', 'nonRepudiation'):
return 'content_commitment'
if usage in ('Key Encipherment', 'keyEncipherment'):
return 'key_encipherment'
if usage in ('Data Encipherment', 'dataEncipherment'):
return 'data_encipherment'
if usage in ('Key Agreement', 'keyAgreement'):
return 'key_agreement'
if usage in ('Certificate Sign', 'keyCertSign'):
return 'key_cert_sign'
if usage in ('CRL Sign', 'cRLSign'):
return 'crl_sign'
if usage in ('Encipher Only', 'encipherOnly'):
return 'encipher_only'
if usage in ('Decipher Only', 'decipherOnly'):
return 'decipher_only'
raise OpenSSLObjectError('Unknown key usage "{0}"'.format(usage))
def cryptography_parse_key_usage_params(usages):
'''
Given a list of key usage identifier strings, returns the parameters for cryptography's x509.KeyUsage().
Raises an OpenSSLObjectError if an identifier is unknown.
'''
params = dict(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
)
for usage in usages:
params[_cryptography_get_keyusage(usage)] = True
return params
def cryptography_get_basic_constraints(constraints):
'''
Given a list of constraints, returns a tuple (ca, path_length).
Raises an OpenSSLObjectError if a constraint is unknown or cannot be parsed.
'''
ca = False
path_length = None
if constraints:
for constraint in constraints:
if constraint.startswith('CA:'):
if constraint == 'CA:TRUE':
ca = True
elif constraint == 'CA:FALSE':
ca = False
else:
raise OpenSSLObjectError('Unknown basic constraint value "{0}" for CA'.format(constraint[3:]))
elif constraint.startswith('pathlen:'):
v = constraint[len('pathlen:'):]
try:
path_length = int(v)
except Exception as e:
raise OpenSSLObjectError('Cannot parse path length constraint "{0}" ({1})'.format(v, e))
else:
raise OpenSSLObjectError('Unknown basic constraint "{0}"'.format(constraint))
return ca, path_length
def binary_exp_mod(f, e, m):
'''Computes f^e mod m in O(log e) multiplications modulo m.'''
# Compute len_e = floor(log_2(e))
len_e = -1
x = e
while x > 0:
x >>= 1
len_e += 1
# Compute f**e mod m
result = 1
for k in range(len_e, -1, -1):
result = (result * result) % m
if ((e >> k) & 1) != 0:
result = (result * f) % m
return result
def simple_gcd(a, b):
'''Compute GCD of its two inputs.'''
while b != 0:
a, b = b, a % b
return a
def quick_is_not_prime(n):
'''Does some quick checks to see if we can poke a hole into the primality of n.
A result of `False` does **not** mean that the number is prime; it just means
that we couldn't detect quickly whether it is not prime.
'''
if n <= 2:
return True
# The constant in the next line is the product of all primes < 200
if simple_gcd(n, 7799922041683461553249199106329813876687996789903550945093032474868511536164700810) > 1:
return True
# TODO: maybe do some iterations of Miller-Rabin to increase confidence
# (https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test)
return False
python_version = (sys.version_info[0], sys.version_info[1])
if python_version >= (2, 7) or python_version >= (3, 1):
# Ansible still supports Python 2.6 on remote nodes
def count_bits(no):
no = abs(no)
if no == 0:
return 0
return no.bit_length()
else:
# Slow, but works
def count_bits(no):
no = abs(no)
count = 0
while no > 0:
no >>= 1
count += 1
return count
PEM_START = '-----BEGIN '
PEM_END = '-----'
PKCS8_PRIVATEKEY_NAMES = ('PRIVATE KEY', 'ENCRYPTED PRIVATE KEY')
PKCS1_PRIVATEKEY_SUFFIX = ' PRIVATE KEY'
def identify_private_key_format(content):
'''Given the contents of a private key file, identifies its format.'''
# See https://github.com/openssl/openssl/blob/master/crypto/pem/pem_pkey.c#L40-L85
# (PEM_read_bio_PrivateKey)
# and https://github.com/openssl/openssl/blob/master/include/openssl/pem.h#L46-L47
# (PEM_STRING_PKCS8, PEM_STRING_PKCS8INF)
try:
lines = content.decode('utf-8').splitlines(False)
if lines[0].startswith(PEM_START) and lines[0].endswith(PEM_END) and len(lines[0]) > len(PEM_START) + len(PEM_END):
name = lines[0][len(PEM_START):-len(PEM_END)]
if name in PKCS8_PRIVATEKEY_NAMES:
return 'pkcs8'
if len(name) > len(PKCS1_PRIVATEKEY_SUFFIX) and name.endswith(PKCS1_PRIVATEKEY_SUFFIX):
return 'pkcs1'
return 'unknown-pem'
except UnicodeDecodeError:
pass
return 'raw'
def cryptography_key_needs_digest_for_signing(key):
'''Tests whether the given private key requires a digest algorithm for signing.
Ed25519 and Ed448 keys do not; they need None to be passed as the digest algorithm.
'''
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return False
if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return False
return True
def cryptography_compare_public_keys(key1, key2):
'''Tests whether two public keys are the same.
Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers().
'''
if CRYPTOGRAPHY_HAS_ED25519:
a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
if a or b:
if not a or not b:
return False
a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
return a == b
if CRYPTOGRAPHY_HAS_ED448:
a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
if a or b:
if not a or not b:
return False
a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
return a == b
return key1.public_numbers() == key2.public_numbers()
if HAS_CRYPTOGRAPHY:
REVOCATION_REASON_MAP = {
'unspecified': x509.ReasonFlags.unspecified,
'key_compromise': x509.ReasonFlags.key_compromise,
'ca_compromise': x509.ReasonFlags.ca_compromise,
'affiliation_changed': x509.ReasonFlags.affiliation_changed,
'superseded': x509.ReasonFlags.superseded,
'cessation_of_operation': x509.ReasonFlags.cessation_of_operation,
'certificate_hold': x509.ReasonFlags.certificate_hold,
'privilege_withdrawn': x509.ReasonFlags.privilege_withdrawn,
'aa_compromise': x509.ReasonFlags.aa_compromise,
'remove_from_crl': x509.ReasonFlags.remove_from_crl,
}
REVOCATION_REASON_MAP_INVERSE = dict()
for k, v in REVOCATION_REASON_MAP.items():
REVOCATION_REASON_MAP_INVERSE[v] = k
def cryptography_decode_revoked_certificate(cert):
result = {
'serial_number': cert.serial_number,
'revocation_date': cert.revocation_date,
'issuer': None,
'issuer_critical': False,
'reason': None,
'reason_critical': False,
'invalidity_date': None,
'invalidity_date_critical': False,
}
try:
ext = cert.extensions.get_extension_for_class(x509.CertificateIssuer)
result['issuer'] = list(ext.value)
result['issuer_critical'] = ext.critical
except x509.ExtensionNotFound:
pass
try:
ext = cert.extensions.get_extension_for_class(x509.CRLReason)
result['reason'] = ext.value.reason
result['reason_critical'] = ext.critical
except x509.ExtensionNotFound:
pass
try:
ext = cert.extensions.get_extension_for_class(x509.InvalidityDate)
result['invalidity_date'] = ext.value.invalidity_date
result['invalidity_date_critical'] = ext.critical
except x509.ExtensionNotFound:
pass
return result
| gpl-3.0 |
KMK-ONLINE/ansible-modules-core | utilities/helper/_fireball.py | 76 | 1209 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: fireball
short_description: Enable fireball mode on remote node
version_added: "0.9"
deprecated: "in favor of SSH with ControlPersist"
description:
- Modern SSH clients support ControlPersist which is just as fast as
fireball was. Please enable that in ansible.cfg as a replacement
for fireball.
- Removed in ansible 2.0.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
'''
| gpl-3.0 |
MarcoMiranda94/FrequencyAnalyzerPy | analyze.py | 1 | 1096 | import optparse
def analyze(inputText):
values = {'a':0,'b':0,'c':0,'d':0,
'e':0,'f':0,'g':0,'h':0,
'i':0,'j':0,'k':0,'l':0,
'm':0,'n':0,'o':0,'p':0,
'q':0,'r':0,'s':0,'t':0,
'u':0,'v':0,'w':0,'x':0,
'y':0,'z':0}
for line in inputText:
for char in line:
for key, value in values.iteritems():
if char == key:
values[key] = value + 1
break
for key, value in values.iteritems():
if not value == 0:
print '[+]Letter ' + str(key) + ' found ' + str(values.get(key)) + ' times\n'
else:
print '[-]Letter ' + str(key) + ' was not found anytime\n'
return
def main():
parser = optparse.OptionParser('usage%prog'+\
'-f <input file>')
parser.add_option('-f', dest='tgtFile', type='string',\
help='specify the name of the input file')
(options, args) = parser.parse_args()
tgtFile = options.tgtFile
if tgtFile == None:
print parser.usage
exit(0)
with open(tgtFile, 'r') as inputFile:
print '[*]Input file was: ' + str(tgtFile) + '\n'
analyze(inputFile.readlines())
if __name__=='__main__':
main() | gpl-2.0 |
astronomeara/xastropy-old | xastropy/igm/abs_sys/ionic_clm.py | 1 | 8435 | """
#;+
#; NAME:
#; ionic_clm
#; Version 1.0
#;
#; PURPOSE:
#; Module for ionic column densities in Abs Systems
#; 28-Oct-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from astropy.io import fits, ascii
from xastropy.atomic import ionization as xai
import xastropy as xa
from xastropy.xutils import xdebug as xdb
#class Ion_Clm(object):
#class Ions_Clm(object):
#class Ionic_Clm_File(object):
#def fits_flag(idx):
# Class for Ionic columns -- one ion at at time
class Ion_Clm(object):
"""Ionic column densities for an absorption system
Attributes:
ion: tuple (Z,ion)
name: string
e.g. Si II
flg_clm: int
Flag describing the measurement
clm: float
log10 column density
sigclm: float
error in log10 column density
"""
# Initialize with wavelength
def __init__(self, ion):
self.iZion = ion
self.name = xaa.ion_name(ion)
self.lines = [] # List of transitions contributing
#self.analy = {} # Analysis inputs (from .clm file)
# Data
self.flg_clm = 0
self.clm = 0.
self.sigclm = 0.
# ###################
# Class for Ionic columns
class Ions_Clm(object):
"""Set of Ionic column densities for a given system
Attributes:
ion_data -- Dict containing the Ion info
"""
# Initialize with wavelength
def __init__(self, all_file=None, trans_file=None):
'''
all_file -- .all file
File for ionic column values
Generally a .all file for parsing
trans_file -- string
File for transition-by-transition measurements
Usually has extension .ion
'''
# Generate -- Other options will appear
# Dictionary stuff
self.keys= ('clm', 'sig_clm', 'flg_clm', 'flg_inst')
self.key_dtype=('f4','f4','i4','i4')
if all_file is not None:
self.read_all_file(all_file)
self.all_file = all_file
# Transitions?
if trans_file is not None:
self.read_ion_file(trans_file)
# Access the data and return a dict
def __getitem__(self, iZion):
try:
return self.ion_data[iZion]
except KeyError:
raise KeyError
# Read a .all file
def read_all_file(self,all_fil):
"""
Read in the .all file in an appropriate manner
NOTEIf program breaks in this function, check the all file
to see if it is properly formatted.
"""
# Read
print('Reading {:s}'.format(all_fil))
names=('Z', 'ion', 'clm', 'sig_clm', 'flg_clm', 'flg_inst')
table = ascii.read(all_fil, format='no_header', names=names)
# Convert to dict
tmp = {}
for row in table:
tmp[(row['Z'],row['ion'])] = {}
for key in self.keys:
tmp[(row['Z'],row['ion'])][key] = row[key]
# Write
self.ion_data = tmp
# Read a .ion file (transitions)
def read_ion_file(self,ion_fil):
"""
Read in the .ion file in an appropriate manner
"""
# Read
names=('wrest', 'clm', 'sig_clm', 'flg_clm', 'flg_inst')
table = ascii.read(ion_fil, format='no_header', names=names)
# Get ion info
adata = xa.spec.abs_line.abs_line_data( table['wrest'], ret_flg=1)
# Add
from astropy.table import Column
Z = Column(adata['Z'], name='Z') # Atomic number
ion = Column(adata['ion'], name='ion') # Atomic number
table.add_columns([Z,ion])
# Save
self.trans = table
# Printing
def __repr__(self):
tmp = '[Ions_Clm]\n'
tmp += 'Z ion logN sigN flgN flgI\n'
tmp += '----------------------------\n'
for keys in self.ion_data:
tmp += '{:2d} {:2d} {:.3f} {:.3f} {:d} {:3d}'.format(keys[0], keys[1],
self.ion_data[keys][self.keys[0]],
self.ion_data[keys][self.keys[1]],
self.ion_data[keys][self.keys[2]],
self.ion_data[keys][self.keys[3]] )
tmp += '\n'
return tmp
## ###################
##
# Class generated when parsing (Mainly useful for AbsSys)
class Ionic_Clm_File(object):
"""Ionic column densities for an absorption system
Attributes:
clm_fil: Systemic redshift
"""
# Initialize with a .clm file
def __init__(self, clm_fil):
#
self.clm_fil = clm_fil
# Parse
self.read_clmfil()
# Read a .CLM file and return a Class
def read_clmfil(self,linedic=None):
"""
Read in the .CLM file in an appropriate manner
NOTEIf program breaks in this function, check the clm to see if it is properly formatted.
RETURNS two dictionaries CLM and LINEDIC. CLM contains the contents of CLM
for the given DLA. THe LINEDIC that is passed (when not None) is updated appropriately.
Keys in the CLM dictionary are:
INST - Instrument used
FITS - a list of fits files
ZABS - absorption redshift
ION - .ION file location
HI - THe HI column and error; [HI, HIerr]
FIX - Any abundances that need fixing from the ION file
VELS - Dictioanry of velocity limits, which is keyed by
FLAGS - Any measurment flags assosicated with VLIM
VLIM - velocity limits in km/s [vmin,vmax]
ELEM - ELement (from get_elem)
See get_elem for properties of LINEDIC
"""
# Read file
f=open(self.clm_fil, 'r')
arr=f.readlines()
f.close()
nline = len(arr)
#
source=arr[0][:-1]
# Data files
self.flg_data = int(arr[1][:-1])
self.fits_files={}
ii=2
for jj in range(0,6):
if (self.flg_data % (2**(jj+1))) > (2**jj - 1):
self.fits_files[2**jj] = arr[ii].strip()
ii += 1
# Redshift
self.zsys=float(arr[ii][:-1]) ; ii+=1
self.ion_fil=arr[ii].strip() ; ii+=1
# NHI
tmp = arr[ii].split(',') ; ii+=1
if len(tmp) != 2:
raise ValueError('ionic_clm: Bad formatting {:s} in {:s}'
.format(arr[ii-1],self.clm_fil))
self.NHI=float(tmp[0])
self.sigNHI=float(tmp[1])
# Abundances by hand
numhand=int(arr[ii][:-1]) ; ii+=1
self.fixabund={}
if numhand>0:
for jj in range(numhand):
# Atomic number
atom=int(arr[ii][:-1]) ; ii+=1
# Values
tmp = arr[ii].strip().split(',') ; ii+=1
self.fixabund[atom]= float(tmp[0]), float(tmp[1]), int(tmp[2])
# Loop on lines
self.clm_lines = {}
while ii < (nline-1):
# No empty lines allowed
if len(arr[ii].strip()) == 0:
break
# Read flag
ionflg = int(arr[ii].strip()); ii+=1
# Read the rest
tmp = arr[ii].split(',') ; ii+=1
if len(tmp) != 4: raise ValueError('ionic_clm: Bad formatting {:s} in {:s}'
.format(arr[ii-1],self.clm_fil))
vmin = float(tmp[1].strip())
vmax = float(tmp[2].strip())
key = float(tmp[0].strip()) # Using a float not string!
# Generate
self.clm_lines[key] = xa.spec.analysis.Spectral_Line(key)
self.clm_lines[key].analy['FLAGS'] = ionflg, int(tmp[3].strip())
# By-hand
if ionflg >= 8:
self.clm_lines[key].measure['N'] = 10.**vmin
self.clm_lines[key].measure['SIGN'] = (10.**(vmin+vmax) - 10.**(vmin-vmax))/2
else:
self.clm_lines[key].analy['VLIM']= [vmin,vmax]
# Converts Flag to Instrument
def fits_flag(idx):
# Standard dict
fits_list = dict(zip(list(map((lambda x: 2**x),range(6))),
['HIRES','ESI','UVES','XX','MIKEb','MIKEr']))
try:
return fits_list[idx]
except:
return 'Unknown'
| bsd-3-clause |
jigarkb/CTCI | LeetCode/380-M-InsertDeleteGetRandomO(1).py | 2 | 2629 | # Design a data structure that supports all following operations in average O(1) time.
#
# insert(val): Inserts an item val to the set if not already present.
# remove(val): Removes an item val from the set if present.
# getRandom: Returns a random element from current set of elements. Each element must have the same probability of
# being returned.
# Example:
#
# // Init an empty set.
# RandomizedSet randomSet = new RandomizedSet();
#
# // Inserts 1 to the set. Returns true as 1 was inserted successfully.
# randomSet.insert(1);
#
# // Returns false as 2 does not exist in the set.
# randomSet.remove(2);
#
# // Inserts 2 to the set, returns true. Set now contains [1,2].
# randomSet.insert(2);
#
# // getRandom should return either 1 or 2 randomly.
# randomSet.getRandom();
#
# // Removes 1 from the set, returns true. Set now contains [2].
# randomSet.remove(1);
#
# // 2 was already in the set, so return false.
# randomSet.insert(2);
#
# // Since 2 is the only number in the set, getRandom always return 2.
# randomSet.getRandom();
import random
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.arr = {}
self.hmap = {}
self.n = 0
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val not in self.hmap:
self.hmap[val] = self.n
self.arr[self.n] = val
self.n += 1
return True
return False
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
if val in self.hmap:
i = self.hmap[val]
if i < self.n - 1:
end_val = self.arr[self.n - 1]
self.hmap[end_val] = i
self.arr[i] = end_val
del self.hmap[val]
del self.arr[self.n - 1]
self.n -= 1
return True
return False
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
if self.n:
return self.arr[random.randint(0, self.n - 1)]
else:
return None
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
# Note:
# Using 2 dicts of value->index and index->value
| mit |
whitepages/nova | nova/cmd/spicehtml5proxy.py | 30 | 1408 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Websocket proxy that is compatible with OpenStack Nova
SPICE HTML5 consoles. Leverages websockify.py by Joel Martin
"""
import sys
from oslo_config import cfg
from nova.cmd import baseproxy
from nova import config
opts = [
cfg.StrOpt('html5proxy_host',
default='0.0.0.0',
help='Host on which to listen for incoming requests'),
cfg.IntOpt('html5proxy_port',
default=6082,
min=1,
max=65535,
help='Port on which to listen for incoming requests'),
]
CONF = cfg.CONF
CONF.register_cli_opts(opts, group='spice')
def main():
config.parse_args(sys.argv)
baseproxy.proxy(
host=CONF.spice.html5proxy_host,
port=CONF.spice.html5proxy_port)
| apache-2.0 |
CMartelLML/numpy | numpy/linalg/info.py | 264 | 1198 | """\
Core Linear Algebra Tools
-------------------------
Linear algebra basics:
- norm Vector or matrix norm
- inv Inverse of a square matrix
- solve Solve a linear system of equations
- det Determinant of a square matrix
- lstsq Solve linear least-squares problem
- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
value decomposition
- matrix_power Integer power of a square matrix
Eigenvalues and decompositions:
- eig Eigenvalues and vectors of a square matrix
- eigh Eigenvalues and eigenvectors of a Hermitian matrix
- eigvals Eigenvalues of a square matrix
- eigvalsh Eigenvalues of a Hermitian matrix
- qr QR decomposition of a matrix
- svd Singular value decomposition of a matrix
- cholesky Cholesky decomposition of a matrix
Tensor operations:
- tensorsolve Solve a linear tensor equation
- tensorinv Calculate an inverse of a tensor
Exceptions:
- LinAlgError Indicates a failed linear algebra operation
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
| bsd-3-clause |
ilyes14/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
vorwerkc/pymatgen | dev_scripts/chemenv/test_algos.py | 4 | 3439 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Development script to test the algorithms of a given model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from math import factorial
import numpy as np
import itertools
from random import shuffle
import time
if __name__ == "__main__":
allcg = AllCoordinationGeometries()
while True:
cg_symbol = input("Enter symbol of the geometry for which you want to get the explicit permutations : ")
try:
cg = allcg[cg_symbol]
break
except LookupError:
print("Wrong geometry, try again ...")
continue
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
myindices = range(cg.coordination_number)
test = input(
'Enter if you want to test all possible permutations ("all" or "a") or a given number of random permutations (i.e. "25")'
)
if test == "all" or test == "a":
perms_iterator = itertools.permutations(myindices)
nperms = factorial(cg.coordination_number)
else:
try:
nperms = int(test)
except Exception:
raise ValueError("Could not turn {} into integer ...".format(test))
perms_iterator = []
for ii in range(nperms):
shuffle(myindices)
perms_iterator.append(list(myindices))
iperm = 1
t1 = time.clock()
for indices_perm in perms_iterator:
lgf.setup_test_perfect_environment(cg_symbol, indices=indices_perm)
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
print("Perm # {:d}/{:d} : ".format(iperm, nperms), indices_perm)
algos_results = []
for algo in cg.algorithms:
print(algo)
if algo.algorithm_type == "EXPLICIT_PERMUTATIONS":
raise ValueError("Do something for the explicit ones ... (these should anyway be by far ok!)")
results = lgf.coordination_geometry_symmetry_measures_separation_plane(
coordination_geometry=cg,
separation_plane_algo=algo,
tested_permutations=False,
points_perfect=points_perfect,
)
print("Number of permutations tested : ", len(results[0]))
algos_results.append(min(results[0]))
if not np.isclose(min(results[0]), 0.0):
print("Following is not 0.0 ...")
input(results)
print(" => ", algos_results)
iperm += 1
t2 = time.clock()
print(
'Time to test {:d} permutations for geometry "{}" (symbol "{}") : {:.2f} seconds'.format(
nperms, cg.name, cg_symbol, t2 - t1
)
)
| mit |
matheuscas/hackathon_paypal | languages/ro.py | 162 | 17306 | # coding: utf8
{
'!=': '!=',
'!langcode!': 'ro',
'!langname!': 'Română',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" (actualizează) este o expresie opțională precum "câmp1=\'valoare_nouă\'". Nu puteți actualiza sau șterge rezultatele unui JOIN',
'%(nrows)s records found': '%(nrows)s înregistrări găsite',
'%d days ago': '%d days ago',
'%d weeks ago': '%d weeks ago',
'%s %%{row} deleted': '%s linii șterse',
'%s %%{row} updated': '%s linii actualizate',
'%s selected': '%s selectat(e)',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(ceva ce seamănă cu "it-it")',
'1 day ago': '1 day ago',
'1 week ago': '1 week ago',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'A new version of web2py is available': 'O nouă versiune de web2py este disponibilă',
'A new version of web2py is available: %s': 'O nouă versiune de web2py este disponibilă: %s',
'About': 'Despre',
'about': 'despre',
'About application': 'Despre aplicație',
'Access Control': 'Control acces',
'Add': 'Adaugă',
'additional code for your application': 'cod suplimentar pentru aplicația dvs.',
'admin disabled because no admin password': 'administrare dezactivată deoarece parola de administrator nu a fost furnizată',
'admin disabled because not supported on google app engine': 'administrare dezactivată deoarece funcționalitatea nu e suportat pe Google App Engine',
'admin disabled because unable to access password file': 'administrare dezactivată deoarece nu există acces la fișierul cu parole',
'Admin is disabled because insecure channel': 'Adminstrarea este dezactivată deoarece conexiunea nu este sigură',
'Admin is disabled because unsecure channel': 'Administrarea este dezactivată deoarece conexiunea nu este securizată',
'Administration': 'Administrare',
'Administrative Interface': 'Interfață administrare',
'Administrator Password:': 'Parolă administrator:',
'Ajax Recipes': 'Rețete Ajax',
'And': 'Și',
'and rename it (required):': 'și renumiți (obligatoriu):',
'and rename it:': ' și renumiți:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin dezactivat deoarece conexiunea nu e sigură',
'application "%s" uninstalled': 'aplicația "%s" a fost dezinstalată',
'application compiled': 'aplicația a fost compilată',
'application is compiled and cannot be designed': 'aplicația este compilată și nu poate fi editată',
'Are you sure you want to delete file "%s"?': 'Sigur ștergeți fișierul "%s"?',
'Are you sure you want to delete this object?': 'Sigur ștergeți acest obiect?',
'Are you sure you want to uninstall application "%s"': 'Sigur dezinstalați aplicația "%s"',
'Are you sure you want to uninstall application "%s"?': 'Sigur dezinstalați aplicația "%s"?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENȚIE: Nu vă puteți conecta decât utilizând o conexiune securizată (HTTPS) sau rulând aplicația pe computerul local.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENȚIE: Nu puteți efectua mai multe teste o dată deoarece lansarea în execuție a mai multor subpocese nu este sigură.',
'ATTENTION: you cannot edit the running application!': 'ATENȚIE: nu puteți edita o aplicație în curs de execuție!',
'Authentication': 'Autentificare',
'Available Databases and Tables': 'Baze de date și tabele disponibile',
'Back': 'Înapoi',
'Buy this book': 'Cumpără această carte',
'Cache': 'Cache',
'cache': 'cache',
'Cache Keys': 'Chei cache',
'cache, errors and sessions cleaned': 'cache, erori și sesiuni golite',
'Cannot be empty': 'Nu poate fi vid',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Compilare imposibilă: aplicația conține erori. Debogați aplicația și încercați din nou.',
'cannot create file': 'fișier imposibil de creat',
'cannot upload file "%(filename)s"': 'imposibil de încărcat fișierul "%(filename)s"',
'Change Password': 'Schimbare parolă',
'Change password': 'Schimbare parolă',
'change password': 'schimbare parolă',
'check all': 'coșați tot',
'Check to delete': 'Coșați pentru a șterge',
'clean': 'golire',
'Clear': 'Golește',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'click to check for upgrades': 'Clic pentru a verifica dacă există upgrade-uri',
'Client IP': 'IP client',
'Community': 'Comunitate',
'compile': 'compilare',
'compiled application removed': 'aplicația compilată a fost ștearsă',
'Components and Plugins': 'Componente și plugin-uri',
'contains': 'conține',
'Controller': 'Controlor',
'Controllers': 'Controlori',
'controllers': 'controlori',
'Copyright': 'Drepturi de autor',
'create file with filename:': 'crează fișier cu numele:',
'Create new application': 'Creați aplicație nouă',
'create new application:': 'crează aplicație nouă:',
'crontab': 'crontab',
'Current request': 'Cerere curentă',
'Current response': 'Răspuns curent',
'Current session': 'Sesiune curentă',
'currently saved or': 'în prezent salvat sau',
'customize me!': 'Personalizează-mă!',
'data uploaded': 'date încărcate',
'Database': 'bază de date',
'Database %s select': 'selectare bază de date %s',
'database administration': 'administrare bază de date',
'Date and Time': 'Data și ora',
'db': 'db',
'DB Model': 'Model bază de date',
'defines tables': 'definire tabele',
'Delete': 'Șterge',
'delete': 'șterge',
'delete all checked': 'șterge tot ce e coșat',
'Delete:': 'Șterge:',
'Demo': 'Demo',
'Deploy on Google App Engine': 'Instalare pe Google App Engine',
'Deployment Recipes': 'Rețete de instalare',
'Description': 'Descriere',
'design': 'design',
'DESIGN': 'DESIGN',
'Design for': 'Design pentru',
'DISK': 'DISK',
'Disk Cache Keys': 'Chei cache de disc',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentație',
"Don't know what to do?": 'Nu știți ce să faceți?',
'done!': 'gata!',
'Download': 'Descărcare',
'E-mail': 'E-mail',
'E-mail invalid': 'E-mail invalid',
'edit': 'editare',
'EDIT': 'EDITARE',
'Edit': 'Editare',
'Edit application': 'Editare aplicație',
'edit controller': 'editare controlor',
'Edit current record': 'Editare înregistrare curentă',
'Edit Profile': 'Editare profil',
'edit profile': 'editare profil',
'Edit This App': 'Editați această aplicație',
'Editing file': 'Editare fișier',
'Editing file "%s"': 'Editare fișier "%s"',
'Email and SMS': 'E-mail și SMS',
'enter a number between %(min)g and %(max)g': 'introduceți un număr între %(min)g și %(max)g',
'enter an integer between %(min)g and %(max)g': 'introduceți un întreg între %(min)g și %(max)g',
'Error logs for "%(app)s"': 'Log erori pentru "%(app)s"',
'errors': 'erori',
'Errors': 'Erori',
'Export': 'Export',
'export as csv file': 'exportă ca fișier csv',
'exposes': 'expune',
'extends': 'extinde',
'failed to reload module': 'reîncarcare modul nereușită',
'False': 'Neadevărat',
'FAQ': 'Întrebări frecvente',
'file "%(filename)s" created': 'fișier "%(filename)s" creat',
'file "%(filename)s" deleted': 'fișier "%(filename)s" șters',
'file "%(filename)s" uploaded': 'fișier "%(filename)s" încărcat',
'file "%(filename)s" was not deleted': 'fișierul "%(filename)s" n-a fost șters',
'file "%s" of %s restored': 'fișier "%s" de %s restaurat',
'file changed on disk': 'fișier modificat pe disc',
'file does not exist': 'fișier inexistent',
'file saved on %(time)s': 'fișier salvat %(time)s',
'file saved on %s': 'fișier salvat pe %s',
'First name': 'Prenume',
'Forbidden': 'Interzis',
'Forms and Validators': 'Formulare și validatori',
'Free Applications': 'Aplicații gratuite',
'Functions with no doctests will result in [passed] tests.': 'Funcțiile fără doctests vor genera teste [trecute].',
'Group %(group_id)s created': 'Grup %(group_id)s creat',
'Group ID': 'ID grup',
'Group uniquely assigned to user %(id)s': 'Grup asociat în mod unic utilizatorului %(id)s',
'Groups': 'Grupuri',
'Hello World': 'Salutare lume',
'help': 'ajutor',
'Home': 'Acasă',
'How did you get here?': 'Cum ați ajuns aici?',
'htmledit': 'editare html',
'import': 'import',
'Import/Export': 'Import/Export',
'includes': 'include',
'Index': 'Index',
'insert new': 'adaugă nou',
'insert new %s': 'adaugă nou %s',
'Installed applications': 'Aplicații instalate',
'internal error': 'eroare internă',
'Internal State': 'Stare internă',
'Introduction': 'Introducere',
'Invalid action': 'Acțiune invalidă',
'Invalid email': 'E-mail invalid',
'invalid password': 'parolă invalidă',
'Invalid password': 'Parolă invalidă',
'Invalid Query': 'Interogare invalidă',
'invalid request': 'cerere invalidă',
'invalid ticket': 'tichet invalid',
'Key': 'Key',
'language file "%(filename)s" created/updated': 'fișier de limbă "%(filename)s" creat/actualizat',
'Language files (static strings) updated': 'Fișierele de limbă (șirurile statice de caractere) actualizate',
'languages': 'limbi',
'Languages': 'Limbi',
'languages updated': 'limbi actualizate',
'Last name': 'Nume',
'Last saved on:': 'Ultima salvare:',
'Layout': 'Șablon',
'Layout Plugins': 'Șablon plugin-uri',
'Layouts': 'Șabloane',
'License for': 'Licență pentru',
'Live Chat': 'Chat live',
'loading...': 'încarc...',
'Logged in': 'Logat',
'Logged out': 'Delogat',
'Login': 'Autentificare',
'login': 'autentificare',
'Login to the Administrative Interface': 'Logare interfață de administrare',
'logout': 'ieșire',
'Logout': 'Ieșire',
'Lost Password': 'Parolă pierdută',
'Lost password?': 'Parolă pierdută?',
'Main Menu': 'Meniu principal',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Model meniu',
'merge': 'unește',
'Models': 'Modele',
'models': 'modele',
'Modules': 'Module',
'modules': 'module',
'My Sites': 'Site-urile mele',
'Name': 'Nume',
'New': 'Nou',
'new application "%s" created': 'aplicația nouă "%s" a fost creată',
'New password': 'Parola nouă',
'New Record': 'Înregistrare nouă',
'new record inserted': 'înregistrare nouă adăugată',
'next 100 rows': 'următoarele 100 de linii',
'NO': 'NU',
'No databases in this application': 'Aplicație fără bază de date',
'Object or table name': 'Obiect sau nume de tabel',
'Old password': 'Parola veche',
'Online examples': 'Exemple online',
'Or': 'Sau',
'or import from csv file': 'sau importă din fișier csv',
'or provide application url:': 'sau furnizează adresă url:',
'Origin': 'Origine',
'Original/Translation': 'Original/Traducere',
'Other Plugins': 'Alte plugin-uri',
'Other Recipes': 'Alte rețete',
'Overview': 'Prezentare de ansamblu',
'pack all': 'împachetează toate',
'pack compiled': 'pachet compilat',
'Password': 'Parola',
"Password fields don't match": 'Câmpurile de parolă nu se potrivesc',
'Peeking at file': 'Vizualizare fișier',
'please input your password again': 'introduceți parola din nou',
'Plugins': 'Plugin-uri',
'Powered by': 'Pus în mișcare de',
'Preface': 'Prefață',
'previous 100 rows': '100 de linii anterioare',
'Profile': 'Profil',
'Python': 'Python',
'Query': 'Interogare',
'Query:': 'Interogare:',
'Quick Examples': 'Exemple rapide',
'RAM': 'RAM',
'RAM Cache Keys': 'Chei cache RAM',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Rețete',
'Record': 'înregistrare',
'record does not exist': 'înregistrare inexistentă',
'Record ID': 'ID înregistrare',
'Record id': 'id înregistrare',
'register': 'înregistrare',
'Register': 'Înregistrare',
'Registration identifier': 'Identificator de autentificare',
'Registration key': 'Cheie înregistrare',
'Registration successful': 'Autentificare reușită',
'Remember me (for 30 days)': 'Ține-mă minte (timp de 30 de zile)',
'remove compiled': 'șterge compilate',
'Request reset password': 'Cerere resetare parolă',
'Reset Password key': 'Cheie restare parolă',
'Resolve Conflict file': 'Fișier rezolvare conflict',
'restore': 'restaurare',
'revert': 'revenire',
'Role': 'Rol',
'Rows in Table': 'Linii în tabel',
'Rows selected': 'Linii selectate',
'save': 'salvare',
'Save profile': 'Salvează profil',
'Saved file hash:': 'Hash fișier salvat:',
'Search': 'Căutare',
'Semantic': 'Semantică',
'Services': 'Servicii',
'session expired': 'sesiune expirată',
'shell': 'line de commandă',
'site': 'site',
'Size of cache:': 'Size of cache:',
'some files could not be removed': 'anumite fișiere n-au putut fi șterse',
'starts with': 'începe cu',
'state': 'stare',
'static': 'static',
'Static files': 'Fișiere statice',
'Statistics': 'Statistics',
'Stylesheet': 'Foaie de stiluri',
'Submit': 'Înregistrează',
'submit': 'submit',
'Support': 'Suport',
'Sure you want to delete this object?': 'Sigur ștergeți acest obiect?',
'Table': 'tabel',
'Table name': 'Nume tabel',
'test': 'test',
'Testing application': 'Testare aplicație',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Interogarea (query)" este o condiție de tipul "db.tabel1.câmp1==\'valoare\'". Ceva de genul "db.tabel1.câmp1==db.tabel2.câmp2" generează un JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'logica aplicației, fiecare rută URL este mapată într-o funcție expusă de controlor',
'The Core': 'Nucleul',
'the data representation, define database tables and sets': 'reprezentarea datelor, definește tabelele bazei de date și seturile (de date)',
'The output of the file is a dictionary that was rendered by the view %s': 'Fișierul produce un dicționar care a fost prelucrat de vederea %s',
'the presentations layer, views are also known as templates': 'nivelul de prezentare, vederile sunt de asemenea numite și șabloane',
'The Views': 'Vederile',
'There are no controllers': 'Nu există controlori',
'There are no models': 'Nu există modele',
'There are no modules': 'Nu există module',
'There are no static files': 'Nu există fișiere statice',
'There are no translators, only default language is supported': 'Nu există traduceri, doar limba implicită este suportată',
'There are no views': 'Nu există vederi',
'these files are served without processing, your images go here': 'aceste fișiere sunt servite fără procesare, imaginea se plasează acolo',
'This App': 'Această aplicație',
'This is a copy of the scaffolding application': 'Aceasta este o copie a aplicației schelet',
'This is the %(filename)s template': 'Aceasta este șablonul fișierului %(filename)s',
'Ticket': 'Tichet',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Moment în timp (timestamp)',
'to previous version.': 'la versiunea anterioară.',
'too short': 'prea scurt',
'translation strings for the application': 'șiruri de caractere folosite la traducerea aplicației',
'True': 'Adevărat',
'try': 'încearcă',
'try something like': 'încearcă ceva de genul',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'Imposibil de verificat dacă există actualizări',
'unable to create application "%s"': 'imposibil de creat aplicația "%s"',
'unable to delete file "%(filename)s"': 'imposibil de șters fișierul "%(filename)s"',
'Unable to download': 'Imposibil de descărcat',
'Unable to download app': 'Imposibil de descărcat aplicația',
'unable to parse csv file': 'imposibil de analizat fișierul csv',
'unable to uninstall "%s"': 'imposibil de dezinstalat "%s"',
'uncheck all': 'decoșează tot',
'uninstall': 'dezinstalează',
'update': 'actualizează',
'update all languages': 'actualizează toate limbile',
'Update:': 'Actualizare:',
'upload application:': 'incarcă aplicația:',
'Upload existing application': 'Încarcă aplicația existentă',
'upload file:': 'încarcă fișier:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Folosiți (...)&(...) pentru AND, (...)|(...) pentru OR, și ~(...) pentru NOT, pentru a crea interogări complexe.',
'User %(id)s Logged-in': 'Utilizator %(id)s autentificat',
'User %(id)s Logged-out': 'Utilizator %(id)s delogat',
'User %(id)s Password changed': 'Parola utilizatorului %(id)s a fost schimbată',
'User %(id)s Password reset': 'Resetare parola utilizator %(id)s',
'User %(id)s Profile updated': 'Profil utilizator %(id)s actualizat',
'User %(id)s Registered': 'Utilizator %(id)s înregistrat',
'User ID': 'ID utilizator',
'value already in database or empty': 'Valoare existentă în baza de date sau vidă',
'Verify Password': 'Verifică parola',
'versioning': 'versiuni',
'Videos': 'Video-uri',
'View': 'Vedere',
'view': 'vedere',
'Views': 'Vederi',
'views': 'vederi',
'web2py is up to date': 'web2py este la zi',
'web2py Recent Tweets': 'Ultimele tweet-uri web2py',
'Welcome': 'Bine ați venit',
'Welcome %s': 'Bine ați venit %s',
'Welcome to web2py': 'Bun venit la web2py',
'Welcome to web2py!': 'Bun venit la web2py!',
'Which called the function %s located in the file %s': 'Care a apelat funcția %s prezentă în fișierul %s',
'YES': 'DA',
'You are successfully running web2py': 'Rulați cu succes web2py',
'You can modify this application and adapt it to your needs': 'Puteți modifica și adapta aplicația nevoilor dvs.',
'You visited the url %s': 'Ați vizitat adresa %s',
}
| mit |
jmartiuk5/python-mode | pymode/libs2/rope/refactor/localtofield.py | 59 | 2111 | from rope.base import pynames, evaluate, exceptions, worder
from rope.refactor.rename import Rename
class LocalToField(object):
def __init__(self, project, resource, offset):
self.project = project
self.pycore = project.pycore
self.resource = resource
self.offset = offset
def get_changes(self):
name = worder.get_name_at(self.resource, self.offset)
this_pymodule = self.pycore.resource_to_pyobject(self.resource)
pyname = evaluate.eval_location(this_pymodule, self.offset)
if not self._is_a_method_local(pyname):
raise exceptions.RefactoringError(
'Convert local variable to field should be performed on \n'
'a local variable of a method.')
pymodule, lineno = pyname.get_definition_location()
function_scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
# Not checking redefinition
#self._check_redefinition(name, function_scope)
new_name = self._get_field_name(function_scope.pyobject, name)
changes = Rename(self.project, self.resource, self.offset).\
get_changes(new_name, resources=[self.resource])
return changes
def _check_redefinition(self, name, function_scope):
class_scope = function_scope.parent
if name in class_scope.pyobject:
raise exceptions.RefactoringError(
'The field %s already exists' % name)
def _get_field_name(self, pyfunction, name):
self_name = pyfunction.get_param_names()[0]
new_name = self_name + '.' + name
return new_name
def _is_a_method_local(self, pyname):
pymodule, lineno = pyname.get_definition_location()
holding_scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
parent = holding_scope.parent
return isinstance(pyname, pynames.AssignedName) and \
pyname in holding_scope.get_names().values() and \
holding_scope.get_kind() == 'Function' and \
parent is not None and parent.get_kind() == 'Class'
| lgpl-3.0 |
VielSoft/odoo | addons/analytic/analytic.py | 94 | 18688 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj.state == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj.state == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj.state == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really useful for consolidation purposes of several companies charts with different currencies, for example."))
if value:
cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id))
self.invalidate_cache(cr, uid, ['currency_id'], [account.id], context=context)
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', required=True, track_visibility='onchange'),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True, track_visibility='onchange', copy=False),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts', copy=True),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries', copy=False),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager', track_visibility='onchange'),
'manager_id': fields.many2one('res.users', 'Account Manager', track_visibility='onchange'),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),
('draft','New'),
('open','In Progress'),
('pending','To Renew'),
('close','Closed'),
('cancelled', 'Cancelled')],
'Status', required=True,
track_visibility='onchange', copy=False),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
if not date_start:
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context=None):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account', context=context),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'manager_id': lambda self, cr, uid, ctx: ctx.get('manager_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'code, name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
""" executed only on the toplevel copied object of the hierarchy.
Subobject are actually copied with copy_data"""
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % analytic['name']
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
account_ids = []
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
if '/' in name:
for name2 in name.split('/'):
# intermediate search without limit and args - could be expensive for large tables if `name` is not selective
account_ids = self.search(cr, uid, dom + [('name', operator, name2.strip())], limit=None, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
if account_ids and args:
# final filtering according to domain (args)4
account_ids = self.search(cr, uid, [('id', 'in', account_ids)] + args, limit=limit, context=context)
if not account_ids:
return super(account_analytic_account, self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
heracek/django-nonrel | django/db/backends/postgresql/creation.py | 247 | 3753 | from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
else:
output = []
return output
| bsd-3-clause |
python-rope/rope | rope/base/codeanalyze.py | 3 | 11315 | import bisect
import re
import token
import tokenize
class ChangeCollector(object):
def __init__(self, text):
self.text = text
self.changes = []
def add_change(self, start, end, new_text=None):
if new_text is None:
new_text = self.text[start:end]
self.changes.append((start, end, new_text))
def get_changed(self):
if not self.changes:
return None
self.changes.sort(key=lambda x: x[:2])
pieces = []
last_changed = 0
for change in self.changes:
start, end, text = change
pieces.append(self.text[last_changed:start] + text)
last_changed = end
if last_changed < len(self.text):
pieces.append(self.text[last_changed:])
result = ''.join(pieces)
if result != self.text:
return result
class SourceLinesAdapter(object):
"""Adapts source to Lines interface
Note: The creation of this class is expensive.
"""
def __init__(self, source_code):
self.code = source_code
self.starts = None
self._initialize_line_starts()
def _initialize_line_starts(self):
self.starts = []
self.starts.append(0)
try:
i = 0
while True:
i = self.code.index('\n', i) + 1
self.starts.append(i)
except ValueError:
pass
self.starts.append(len(self.code) + 1)
def get_line(self, lineno):
return self.code[self.starts[lineno - 1]:
self.starts[lineno] - 1]
def length(self):
return len(self.starts) - 1
def get_line_number(self, offset):
return bisect.bisect(self.starts, offset)
def get_line_start(self, lineno):
return self.starts[lineno - 1]
def get_line_end(self, lineno):
return self.starts[lineno] - 1
class ArrayLinesAdapter(object):
def __init__(self, lines):
self.lines = lines
def get_line(self, line_number):
return self.lines[line_number - 1]
def length(self):
return len(self.lines)
class LinesToReadline(object):
def __init__(self, lines, start):
self.lines = lines
self.current = start
def readline(self):
if self.current <= self.lines.length():
self.current += 1
return self.lines.get_line(self.current - 1) + '\n'
return ''
def __call__(self):
return self.readline()
class _CustomGenerator(object):
def __init__(self, lines):
self.lines = lines
self.in_string = ''
self.open_count = 0
self.continuation = False
def __call__(self):
size = self.lines.length()
result = []
i = 1
while i <= size:
while i <= size and not self.lines.get_line(i).strip():
i += 1
if i <= size:
start = i
while True:
line = self.lines.get_line(i)
self._analyze_line(line)
if not (self.continuation or self.open_count or
self.in_string) or i == size:
break
i += 1
result.append((start, i))
i += 1
return result
# Matches all backslashes before the token, to detect escaped quotes
_main_tokens = re.compile(r'(\\*)((\'\'\'|"""|\'|")|#|\[|\]|\{|\}|\(|\))')
def _analyze_line(self, line):
token = None
for match in self._main_tokens.finditer(line):
prefix = match.group(1)
token = match.group(2)
# Skip any tokens which are escaped
if len(prefix) % 2 == 1:
continue
if token in ["'''", '"""', "'", '"']:
if not self.in_string:
self.in_string = token
elif self.in_string == token or (self.in_string in ['"', "'"] and token == 3*self.in_string):
self.in_string = ''
if self.in_string:
continue
if token == '#':
break
if token in '([{':
self.open_count += 1
elif token in ')]}':
self.open_count -= 1
if line and token != '#' and line.endswith('\\'):
self.continuation = True
else:
self.continuation = False
def custom_generator(lines):
return _CustomGenerator(lines)()
class LogicalLineFinder(object):
def __init__(self, lines):
self.lines = lines
def logical_line_in(self, line_number):
indents = count_line_indents(self.lines.get_line(line_number))
tries = 0
while True:
block_start = get_block_start(self.lines, line_number, indents)
try:
return self._block_logical_line(block_start, line_number)
except IndentationError as e:
tries += 1
if tries == 5:
raise e
lineno = e.lineno + block_start - 1
indents = count_line_indents(self.lines.get_line(lineno))
def generate_starts(self, start_line=1, end_line=None):
for start, end in self.generate_regions(start_line, end_line):
yield start
def generate_regions(self, start_line=1, end_line=None):
# XXX: `block_start` should be at a better position!
block_start = 1
readline = LinesToReadline(self.lines, block_start)
try:
for start, end in self._logical_lines(readline):
real_start = start + block_start - 1
real_start = self._first_non_blank(real_start)
if end_line is not None and real_start >= end_line:
break
real_end = end + block_start - 1
if real_start >= start_line:
yield (real_start, real_end)
except tokenize.TokenError:
pass
def _block_logical_line(self, block_start, line_number):
readline = LinesToReadline(self.lines, block_start)
shifted = line_number - block_start + 1
region = self._calculate_logical(readline, shifted)
start = self._first_non_blank(region[0] + block_start - 1)
if region[1] is None:
end = self.lines.length()
else:
end = region[1] + block_start - 1
return start, end
def _calculate_logical(self, readline, line_number):
last_end = 1
try:
for start, end in self._logical_lines(readline):
if line_number <= end:
return (start, end)
last_end = end + 1
except tokenize.TokenError as e:
current = e.args[1][0]
return (last_end, max(last_end, current - 1))
return (last_end, None)
def _logical_lines(self, readline):
last_end = 1
for current_token in tokenize.generate_tokens(readline):
current = current_token[2][0]
if current_token[0] == token.NEWLINE:
yield (last_end, current)
last_end = current + 1
def _first_non_blank(self, line_number):
current = line_number
while current < self.lines.length():
line = self.lines.get_line(current).strip()
if line and not line.startswith('#'):
return current
current += 1
return current
def tokenizer_generator(lines):
return LogicalLineFinder(lines).generate_regions()
class CachingLogicalLineFinder(object):
def __init__(self, lines, generate=custom_generator):
self.lines = lines
self._generate = generate
_starts = None
@property
def starts(self):
if self._starts is None:
self._init_logicals()
return self._starts
_ends = None
@property
def ends(self):
if self._ends is None:
self._init_logicals()
return self._ends
def _init_logicals(self):
"""Should initialize _starts and _ends attributes"""
size = self.lines.length() + 1
self._starts = [None] * size
self._ends = [None] * size
for start, end in self._generate(self.lines):
self._starts[start] = True
self._ends[end] = True
def logical_line_in(self, line_number):
start = line_number
while start > 0 and not self.starts[start]:
start -= 1
if start == 0:
try:
start = self.starts.index(True, line_number)
except ValueError:
return (line_number, line_number)
return (start, self.ends.index(True, start))
def generate_starts(self, start_line=1, end_line=None):
if end_line is None:
end_line = self.lines.length()
for index in range(start_line, end_line):
if self.starts[index]:
yield index
def get_block_start(lines, lineno, maximum_indents=80):
"""Approximate block start"""
pattern = get_block_start_patterns()
for i in range(lineno, 0, -1):
match = pattern.search(lines.get_line(i))
if match is not None and \
count_line_indents(lines.get_line(i)) <= maximum_indents:
striped = match.string.lstrip()
# Maybe we're in a list comprehension or generator expression
if i > 1 and striped.startswith('if') or striped.startswith('for'):
bracs = 0
for j in range(i, min(i + 5, lines.length() + 1)):
for c in lines.get_line(j):
if c == '#':
break
if c in '[(':
bracs += 1
if c in ')]':
bracs -= 1
if bracs < 0:
break
if bracs < 0:
break
if bracs < 0:
continue
return i
return 1
_block_start_pattern = None
def get_block_start_patterns():
global _block_start_pattern
if not _block_start_pattern:
pattern = '^\\s*(((def|class|if|elif|except|for|while|with)\\s)|'\
'((try|else|finally|except)\\s*:))'
_block_start_pattern = re.compile(pattern, re.M)
return _block_start_pattern
def count_line_indents(line):
indents = 0
for char in line:
if char == ' ':
indents += 1
elif char == '\t':
indents += 8
else:
return indents
return 0
def get_string_pattern_with_prefix(prefix):
longstr = r'%s"""(\\.|"(?!"")|\\\n|[^"\\])*"""' % prefix
shortstr = r'%s"(\\.|\\\n|[^"\\\n])*"' % prefix
return '|'.join([longstr, longstr.replace('"', "'"),
shortstr, shortstr.replace('"', "'")])
def get_string_pattern():
prefix = r'(?<![fF])(\b[uUbB]?[rR]?)?'
return get_string_pattern_with_prefix(prefix)
def get_formatted_string_pattern():
prefix = r'(\b[rR]?[fF]|[fF][rR]?)'
return get_string_pattern_with_prefix(prefix)
def get_comment_pattern():
return r'#[^\n]*'
| lgpl-3.0 |
rdkit/rdkit-orig | rdkit/ML/DecTree/ID3.py | 2 | 6741 | #
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
#
""" ID3 Decision Trees
contains an implementation of the ID3 decision tree algorithm
as described in Tom Mitchell's book "Machine Learning"
It relies upon the _Tree.TreeNode_ data structure (or something
with the same API) defined locally to represent the trees
"""
import numpy
from rdkit.ML.DecTree import DecTree
from rdkit.ML.InfoTheory import entropy
def CalcTotalEntropy(examples,nPossibleVals):
""" Calculates the total entropy of the data set (w.r.t. the results)
**Arguments**
- examples: a list (nInstances long) of lists of variable values + instance
values
- nPossibleVals: a list (nVars long) of the number of possible values each variable
can adopt.
**Returns**
a float containing the informational entropy of the data set.
"""
nRes = nPossibleVals[-1]
resList = numpy.zeros(nRes,'i')
for example in examples:
res = example[-1]
resList[res] = resList[res] + 1
return entropy.InfoEntropy(resList)
def GenVarTable(examples,nPossibleVals,vars):
"""Generates a list of variable tables for the examples passed in.
The table for a given variable records the number of times each possible value
of that variable appears for each possible result of the function.
**Arguments**
- examples: a list (nInstances long) of lists of variable values + instance
values
- nPossibleVals: a list containing the number of possible values of
each variable + the number of values of the function.
- vars: a list of the variables to include in the var table
**Returns**
a list of variable result tables. Each table is a Numeric array
which is varValues x nResults
"""
nVars = len(vars)
res = [None]*nVars
nFuncVals = nPossibleVals[-1]
for i in xrange(nVars):
res[i] = numpy.zeros((nPossibleVals[vars[i]],nFuncVals),'i')
for example in examples:
val = int(example[-1])
for i in xrange(nVars):
res[i][int(example[vars[i]]),val] += 1
return res
def ID3(examples,target,attrs,nPossibleVals,depth=0,maxDepth=-1,
**kwargs):
""" Implements the ID3 algorithm for constructing decision trees.
From Mitchell's book, page 56
This is *slightly* modified from Mitchell's book because it supports
multivalued (non-binary) results.
**Arguments**
- examples: a list (nInstances long) of lists of variable values + instance
values
- target: an int
- attrs: a list of ints indicating which variables can be used in the tree
- nPossibleVals: a list containing the number of possible values of
every variable.
- depth: (optional) the current depth in the tree
- maxDepth: (optional) the maximum depth to which the tree
will be grown
**Returns**
a DecTree.DecTreeNode with the decision tree
**NOTE:** This code cannot bootstrap (start from nothing...)
use _ID3Boot_ (below) for that.
"""
varTable = GenVarTable(examples,nPossibleVals,attrs)
tree=DecTree.DecTreeNode(None,'node')
# store the total entropy... in case that is interesting
totEntropy = CalcTotalEntropy(examples,nPossibleVals)
tree.SetData(totEntropy)
#tree.SetExamples(examples)
# the matrix of results for this target:
tMat = GenVarTable(examples,nPossibleVals,[target])[0]
# counts of each result code:
counts = sum(tMat)
nzCounts = numpy.nonzero(counts)[0]
if len(nzCounts) == 1:
# bottomed out because there is only one result code left
# with any counts (i.e. there's only one type of example
# left... this is GOOD!).
res = nzCounts[0]
tree.SetLabel(res)
tree.SetName(str(res))
tree.SetTerminal(1)
elif len(attrs) == 0 or (maxDepth>=0 and depth>=maxDepth):
# Bottomed out: no variables left or max depth hit
# We don't really know what to do here, so
# use the heuristic of picking the most prevalent
# result
v = numpy.argmax(counts)
tree.SetLabel(v)
tree.SetName('%d?'%v)
tree.SetTerminal(1)
else:
# find the variable which gives us the largest information gain
gains = [entropy.InfoGain(x) for x in varTable]
best = attrs[numpy.argmax(gains)]
# remove that variable from the lists of possible variables
nextAttrs = attrs[:]
if not kwargs.get('recycleVars',0):
nextAttrs.remove(best)
# set some info at this node
tree.SetName('Var: %d'%best)
tree.SetLabel(best)
#tree.SetExamples(examples)
tree.SetTerminal(0)
# loop over possible values of the new variable and
# build a subtree for each one
for val in xrange(nPossibleVals[best]):
nextExamples = []
for example in examples:
if example[best] == val:
nextExamples.append(example)
if len(nextExamples) == 0:
# this particular value of the variable has no examples,
# so there's not much sense in recursing.
# This can (and does) happen.
v = numpy.argmax(counts)
tree.AddChild('%d'%v,label=v,data=0.0,isTerminal=1)
else:
# recurse
tree.AddChildNode(ID3(nextExamples,best,nextAttrs,nPossibleVals,depth+1,maxDepth,
**kwargs))
return tree
def ID3Boot(examples,attrs,nPossibleVals,initialVar=None,depth=0,maxDepth=-1,
**kwargs):
""" Bootstrapping code for the ID3 algorithm
see ID3 for descriptions of the arguments
If _initialVar_ is not set, the algorithm will automatically
choose the first variable in the tree (the standard greedy
approach). Otherwise, _initialVar_ will be used as the first
split.
"""
totEntropy = CalcTotalEntropy(examples,nPossibleVals)
varTable = GenVarTable(examples,nPossibleVals,attrs)
tree=DecTree.DecTreeNode(None,'node')
#tree.SetExamples(examples)
tree._nResultCodes = nPossibleVals[-1]
# <perl>you've got to love any language which will let you
# do this much work in a single line :-)</perl>
if initialVar is None:
best = attrs[numpy.argmax([entropy.InfoGain(x) for x in varTable])]
else:
best = initialVar
tree.SetName('Var: %d'%best)
tree.SetData(totEntropy)
tree.SetLabel(best)
tree.SetTerminal(0)
nextAttrs = attrs[:]
if not kwargs.get('recycleVars',0):
nextAttrs.remove(best)
for val in xrange(nPossibleVals[best]):
nextExamples = []
for example in examples:
if example[best] == val:
nextExamples.append(example)
tree.AddChildNode(ID3(nextExamples,best,nextAttrs,nPossibleVals,depth,maxDepth,
**kwargs))
return tree
| bsd-3-clause |
hehongliang/tensorflow | tensorflow/python/autograph/converters/asserts.py | 24 | 1746 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts assert statements to their corresponding TF calls."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import templates
class AssertTransformer(converter.Base):
"""Transforms Assert nodes to Call so they can be handled as functions."""
def visit_Assert(self, node):
self.generic_visit(node)
# Note: The lone tf.Assert call will be wrapped with control_dependencies
# by side_effect_guards.
template = """
ag__.assert_stmt(test, lambda: msg)
"""
if node.msg is None:
return templates.replace(
template, test=node.test, msg=gast.Str('Assertion error'))
elif isinstance(node.msg, gast.Str):
return templates.replace(template, test=node.test, msg=node.msg)
else:
raise NotImplementedError('can only convert string messages for now.')
def transform(node, ctx):
return AssertTransformer(ctx).visit(node)
| apache-2.0 |
mszewczy/odoo | addons/account_payment/account_move_line.py | 241 | 4455 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from operator import itemgetter
class account_move_line(osv.osv):
_inherit = "account.move.line"
# delegate to parent, used for local fields.function redefinition
def _amount_to_pay(self, cr, uid, ids, field_names, args, context=None):
return {
id: value['amount_residual']
for id, value in self._amount_residual(cr, uid, ids, field_names, args,
context=context).items()
}
def _to_pay_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
line_obj = self.pool.get('account.move.line')
query = line_obj._query_get(cr, uid, context={})
where = ' and '.join(map(lambda x: '''(SELECT
CASE WHEN l.amount_currency < 0
THEN - l.amount_currency
ELSE l.credit
END - coalesce(sum(pl.amount_currency), 0)
FROM payment_line pl
INNER JOIN payment_order po ON (pl.order_id = po.id)
WHERE move_line_id = l.id
AND po.state != 'cancel'
) %(operator)s %%s ''' % {'operator': x[1]}, args))
sql_args = tuple(map(itemgetter(2), args))
cr.execute(('''SELECT id
FROM account_move_line l
WHERE account_id IN (select id
FROM account_account
WHERE type=%s AND active)
AND reconcile_id IS null
AND credit > 0
AND ''' + where + ' and ' + query), ('payable',)+sql_args )
res = cr.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', map(lambda x:x[0], res))]
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if not line2bank.get(line.id) and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
_columns = {
'amount_to_pay': fields.function(_amount_to_pay,
type='float', string='Amount to pay', fnct_search=_to_pay_search),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gfyoung/pandas | pandas/core/dtypes/missing.py | 2 | 18331 | """
missing types & inference
"""
from functools import partial
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import NaT, Period, iNaT
from pandas._typing import ArrayLike, DtypeObj
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
TD64NS_DTYPE,
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetimelike_v_numeric,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
is_string_like_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
ABCIndex,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
isneginf_scalar = libmissing.isneginf_scalar
nan_checker = np.isnan
INF_AS_NA = False
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
isnull = isna
def _isna(obj, inf_as_na: bool = False):
"""
Detect missing values, treating None, NaN or NA as null. Infinite
values will also be treated as null if inf_as_na is True.
Parameters
----------
obj: ndarray or object value
Input array or scalar value.
inf_as_na: bool
Whether to treat infinity as null.
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
if inf_as_na:
return libmissing.checknull_old(obj)
else:
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)):
return _isna_ndarraylike(obj, inf_as_na=inf_as_na)
elif isinstance(obj, ABCDataFrame):
return obj.isna()
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
elif hasattr(obj, "__array__"):
return _isna_ndarraylike(np.asarray(obj), inf_as_na=inf_as_na)
else:
return False
def _use_inf_as_na(key):
"""
Option change callback for na/inf behaviour.
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* https://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
inf_as_na = get_option(key)
globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
if inf_as_na:
globals()["nan_checker"] = lambda x: ~np.isfinite(x)
globals()["INF_AS_NA"] = True
else:
globals()["nan_checker"] = np.isnan
globals()["INF_AS_NA"] = False
def _isna_ndarraylike(obj, inf_as_na: bool = False):
"""
Return an array indicating which values of the input array are NaN / NA.
Parameters
----------
obj: array-like
The input array whose elements are to be checked.
inf_as_na: bool
Whether or not to treat infinite values as NA.
Returns
-------
array-like
Array of boolean values denoting the NA status of each element.
"""
values = getattr(obj, "_values", obj)
dtype = values.dtype
if is_extension_array_dtype(dtype):
if inf_as_na and is_categorical_dtype(dtype):
result = libmissing.isnaobj_old(values.to_numpy())
else:
result = values.isna()
elif is_string_dtype(dtype):
result = _isna_string_dtype(values, dtype, inf_as_na=inf_as_na)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
if inf_as_na:
result = ~np.isfinite(values)
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
return result
def _isna_string_dtype(
values: np.ndarray, dtype: np.dtype, inf_as_na: bool
) -> np.ndarray:
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
if inf_as_na:
vec = libmissing.isnaobj_old(values.ravel())
else:
vec = libmissing.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
return result
def notna(obj):
"""
Detect non-missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are valid (not missing, which is ``NaN`` in numeric
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : array-like or object value
Object to check for *not* null or *non*-missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is valid.
See Also
--------
isna : Boolean inverse of pandas.notna.
Series.notna : Detect valid values in a Series.
DataFrame.notna : Detect valid values in a DataFrame.
Index.notna : Detect valid values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.notna('dog')
True
>>> pd.notna(pd.NA)
False
>>> pd.notna(np.nan)
False
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.notna(array)
array([[ True, False, True],
[ True, True, False]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.notna(index)
array([ True, True, False, True])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.notna(df)
0 1 2
0 True True True
1 True False True
>>> pd.notna(df[1])
0 True
1 False
Name: 1, dtype: bool
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def isna_compat(arr, fill_value=np.nan) -> bool:
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
if isna(fill_value):
dtype = arr.dtype
return not (is_bool_dtype(dtype) or is_integer_dtype(dtype))
return True
def array_equivalent(
left, right, strict_nan: bool = False, dtype_equal: bool = False
) -> bool:
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
dtype_equal : bool, default False
Whether `left` and `right` are known to have the same dtype
according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
require that the dtypes match. Setting this to ``True`` can improve
performance, but will give different results for arrays that are
equal but different dtypes.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
if dtype_equal:
# fastpath when we require that the dtypes match (Block.equals)
if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype):
return _array_equivalent_float(left, right)
elif is_datetimelike_v_numeric(left.dtype, right.dtype):
return False
elif needs_i8_conversion(left.dtype):
return _array_equivalent_datetimelike(left, right)
elif is_string_dtype(left.dtype):
# TODO: fastpath for pandas' StringDtype
return _array_equivalent_object(left, right, strict_nan)
else:
return np.array_equal(left, right)
# Slow path when we allow comparing different dtypes.
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left.dtype) or is_string_dtype(right.dtype):
return _array_equivalent_object(left, right, strict_nan)
# NaNs can occur in float and complex arrays.
if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype):
if not (np.prod(left.shape) and np.prod(right.shape)):
return True
return ((left == right) | (isna(left) & isna(right))).all()
elif is_datetimelike_v_numeric(left, right):
# GH#29553 avoid numpy deprecation warning
return False
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# datetime64, timedelta64, Period
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view("i8")
right = right.view("i8")
# if we have structured dtypes, compare first
if (
left.dtype.type is np.void or right.dtype.type is np.void
) and left.dtype != right.dtype:
return False
return np.array_equal(left, right)
def _array_equivalent_float(left, right):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
def _array_equivalent_datetimelike(left, right):
return np.array_equal(left.view("i8"), right.view("i8"))
def _array_equivalent_object(left, right, strict_nan):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
ensure_object(left.ravel()), ensure_object(right.ravel())
)
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif left_value is libmissing.NA and right_value is not libmissing.NA:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if not isinstance(right_value, float) or not np.isnan(right_value):
return False
else:
try:
if np.any(np.asarray(left_value != right_value)):
return False
except TypeError as err:
if "Cannot compare tz-naive" in str(err):
# tzawareness compat failure, see GH#28507
return False
elif "boolean value of NA is ambiguous" in str(err):
return False
raise
return True
def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
"""
ExtensionArray-compatible implementation of array_equivalent.
"""
if not is_dtype_equal(left.dtype, right.dtype):
return False
elif isinstance(left, ABCExtensionArray):
return left.equals(right)
else:
return array_equivalent(left, right, dtype_equal=True)
def infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if needs_i8_conversion(val.dtype):
return np.array("NaT", dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ["datetime", "datetime64"]:
return np.array("NaT", dtype=DT64NS_DTYPE)
elif dtype in ["timedelta", "timedelta64"]:
return np.array("NaT", dtype=TD64NS_DTYPE)
return np.nan
def maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype, compat: bool = True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : bool, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if needs_i8_conversion(dtype):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
if compat:
return False
return np.nan
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(np.asarray(arr))]
def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool:
"""
isna check that excludes incompatible dtypes
Parameters
----------
obj : object
dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
Returns
-------
bool
"""
if not lib.is_scalar(obj) or not isna(obj):
return False
if dtype.kind == "M":
return not isinstance(obj, np.timedelta64)
if dtype.kind == "m":
return not isinstance(obj, np.datetime64)
if dtype.kind in ["i", "u", "f", "c"]:
# Numeric
return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
# must be PeriodDType
return not isinstance(obj, (np.datetime64, np.timedelta64))
def isna_all(arr: ArrayLike) -> bool:
"""
Optimized equivalent to isna(arr).all()
"""
total_len = len(arr)
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases.
# parameters 1000 and 40 were chosen arbitrarily
chunk_len = max(total_len // 40, 1000)
dtype = arr.dtype
if dtype.kind == "f":
checker = nan_checker
elif dtype.kind in ["m", "M"] or dtype.type is Period:
checker = lambda x: np.asarray(x.view("i8")) == iNaT
else:
checker = lambda x: _isna_ndarraylike(x, inf_as_na=INF_AS_NA)
return all(
checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)
)
| bsd-3-clause |
atgeirr/opm-common | jenkins/convert.py | 20 | 2317 | #!/usr/bin/env python
# coding: utf-8
# originally from:
# http://www.warp1337.com/content/how-use-ctest-jenkins-xunit-or-junit-plugin
# improved by:
# Jorge Araya Navarro <elcorreo@deshackra.com>
# Veni, Sancte Spiritus.
from lxml import etree
import argparse
from os.path import expanduser
from os.path import join
import logging
# configure logging
logging.basicConfig(format="%(levelname)s: %(message)s",
level=logging.ERROR)
desc = ("Converts ctest XML file to xUnit/JUnit XML "
"compatible file to use with Jenkins-CI. "
"Did you found any bug? please report it on: "
"https://bitbucket.org/shackra/ctest-jenkins/issues")
# configure argument parser.
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-x", "--xslt", help="the XSLT file to use", required=True)
parser.add_argument("-t", "--tag", help=("the directory where 'Testing/TAG'"
"file is. Remember to call ctest with"
" '-T test' option to generate it"),
required=True)
parsed = parser.parse_args()
# expanding user symbol "~"
parsed.xsl = expanduser(parsed.xslt)
parsed.tag = expanduser(parsed.tag)
# opening the TAG file
directory = None
try:
with open(join(parsed.tag, "Testing", "TAG")) as tagfile:
directory = tagfile.readline().strip()
except NotADirectoryError:
logging.error(
"'Testing/TAG' wasn't found on directory '{}'.".format(parsed.tag))
exit(1)
except FileNotFoundError:
logging.error(
"File '{}' not found.".format(join(parsed.tag, "Testing", "TAG")))
exit(1)
xmldoc = None
transform = None
try:
with open(join(parsed.tag, "Testing", directory, "Test.xml"))\
as testxmlfile:
xmldoc = etree.parse(testxmlfile)
except FileNotFoundError:
logging.error("File {} not found. Was it deleted or moved?".format(
join(parsed.tag, "Testing", directory, "Test.xml")))
exit(1)
try:
with open(parsed.xslt) as xsltfile:
xslt_root = etree.XML(xsltfile.read())
transform = etree.XSLT(xslt_root)
except FileNotFoundError:
logging.error("File {} not found.".format(parsed.xslt))
exit(1)
result_tree = transform(xmldoc)
print(result_tree)
| gpl-3.0 |
mibexsoftware/alfred-bamboo-workflow | workflow/src/routing.py | 1 | 2072 | # -*- coding: utf-8 -*-
from src import icons, __version__
from src.actions import HOST_URL
from src.actions.branches import BranchesWorkflowAction
from src.actions.configure import ConfigureWorkflowAction
from src.actions.help import HelpWorkflowAction
from src.actions.index import IndexWorkflowAction
from src.actions.plans import PlanWorkflowAction
from src.actions.projects import ProjectWorkflowAction
from src.actions.status import ResultsWorkflowAction
from src.actions.dashboard import DashboardWorkflowAction
from src.util import workflow, call_alfred
WORKFLOW_ACTIONS = {
':config': ConfigureWorkflowAction,
':dashboard': DashboardWorkflowAction,
':projects': ProjectWorkflowAction,
':plans': PlanWorkflowAction,
':branches': BranchesWorkflowAction,
':results': ResultsWorkflowAction,
':help': HelpWorkflowAction
}
def route(args): # e.g., args = ":config sethost http://localhost,--exec"
command_string = args[0] # :config sethost http://localhost
command = command_string.split(' ')
if not workflow().settings.get(HOST_URL, None) and 'sethost' not in command:
call_alfred('bamboo:config sethost ')
return
handler = IndexWorkflowAction
action = next(iter(command), None)
if action:
handler = WORKFLOW_ACTIONS.get(action, IndexWorkflowAction)
if '--exec' in args:
handler().execute(command, ctrl_pressed='--cmd' in args, shift_pressed='--shift' in args)
else: # show menu
handler().menu(command)
_notify_if_upgrade_available()
workflow().send_feedback()
def _notify_if_upgrade_available():
if workflow().update_available:
new_version = workflow().cached_data('__workflow_update_status', max_age=0)['version']
workflow().add_item('An update is available!',
'Update the workflow from version {} to {}'.format(__version__, new_version),
arg=':config update',
valid=True,
icon=icons.UPDATE)
| mit |
syci/OCB | addons/point_of_sale/res_config.py | 34 | 1590 | from openerp.osv import fields, osv
class pos_configuration(osv.TransientModel):
_inherit = 'res.config.settings'
_name = 'pos.config.settings'
_columns = {
'module_pos_restaurant': fields.selection([
(0, "Point of sale for shops"),
(1, "Restaurant: activate table management")
], "Restaurant",
help='This module adds several restaurant features to the Point of Sale: \n\n- Bill Printing: Allows you to print a receipt before the order is paid \n\n- Bill Splitting: Allows you to split an order into different orders \n\n- Kitchen Order Printing: allows you to print orders updates to kitchen or bar printers'),
'module_pos_loyalty': fields.boolean("Loyalty Program",
help='Allows you to define a loyalty program in the point of sale, where the customers earn loyalty points and get rewards'),
'module_pos_discount': fields.selection([
(0, "Allow discounts on order lines only"),
(1, "Allow global discounts")
], "Discount",
help='Allows the cashier to quickly give a percentage sale discount for all the sales order to a customer'),
'module_pos_mercury': fields.selection([
(0, "No credit card"),
(1, "Allows customers to pay with credit cards.")
], "Credit Cards",
help='The transactions are processed by MercuryPay'),
'module_pos_reprint': fields.selection([
(0, "No reprint"),
(1, "Allow cashier to reprint receipts")
], "Reprints"),
}
| agpl-3.0 |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/contrib/rnn/python/ops/gru_ops.py | 17 | 7762 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for the Block GRU Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.ops import gen_gru_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.deprecation import deprecated_args
_gru_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_gru_ops.so"))
@ops.RegisterGradient("GRUBlockCell")
def _GRUBlockCellGrad(op, *grad):
r"""Gradient for GRUBlockCell.
Args:
op: Op for which the gradient is defined.
*grad: Gradients of the optimization function wrt output
for the Op.
Returns:
d_x: Gradients wrt to x
d_h: Gradients wrt to h
d_w_ru: Gradients wrt to w_ru
d_w_c: Gradients wrt to w_c
d_b_ru: Gradients wrt to b_ru
d_b_c: Gradients wrt to b_c
Mathematics behind the Gradients below:
```
d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
d_r_bar_u_bar = [d_r_bar d_u_bar]
[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
d_x = d_x_component_1 + d_x_component_2
d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
```
Below calculation is performed in the python wrapper for the Gradients
(not in the gradient kernel.)
```
d_w_ru = x_h_prevr^T * d_c_bar
d_w_c = x_h_prev^T * d_r_bar_u_bar
d_b_ru = sum of d_r_bar_u_bar along axis = 0
d_b_c = sum of d_c_bar along axis = 0
```
"""
x, h_prev, w_ru, w_c, b_ru, b_c = op.inputs
r, u, c, _ = op.outputs
_, _, _, d_h = grad
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = gen_gru_ops.gru_block_cell_grad(
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
x_h_prev = array_ops.concat([x, h_prev], 1)
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
x_h_prevr = array_ops.concat([x, h_prev * r], 1)
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
d_b_c = nn_ops.bias_add_grad(d_c_bar)
return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c
class GRUBlockCell(rnn_cell_impl.RNNCell):
r"""Block GRU cell implementation.
Deprecated: use GRUBlockCellV2 instead.
The implementation is based on: http://arxiv.org/abs/1406.1078
Computes the GRU cell forward propagation for 1 time step.
This kernel op implements the following mathematical equations:
Biases are initialized with:
* `b_ru` - constant_initializer(1.0)
* `b_c` - constant_initializer(0.0)
```
x_h_prev = [x, h_prev]
[r_bar u_bar] = x_h_prev * w_ru + b_ru
r = sigmoid(r_bar)
u = sigmoid(u_bar)
h_prevr = h_prev \circ r
x_h_prevr = [x h_prevr]
c_bar = x_h_prevr * w_c + b_c
c = tanh(c_bar)
h = (1-u) \circ c + u \circ h_prev
```
"""
@deprecated_args(None, "cell_size is deprecated, use num_units instead",
"cell_size")
def __init__(self, num_units=None, cell_size=None):
"""Initialize the Block GRU cell.
Args:
num_units: int, The number of units in the GRU cell.
cell_size: int, The old (deprecated) name for `num_units`.
Raises:
ValueError: if both cell_size and num_units are not None;
or both are None.
"""
if (cell_size is None) == (num_units is None):
raise ValueError("Exactly one of num_units or cell_size must be provided.")
if num_units is None:
num_units = cell_size
self._cell_size = num_units
@property
def state_size(self):
return self._cell_size
@property
def output_size(self):
return self._cell_size
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
w_ru = vs.get_variable("w_ru", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"b_ru", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
w_c = vs.get_variable("w_c",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"b_c", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
class GRUBlockCellV2(GRUBlockCell):
"""Temporary GRUBlockCell impl with a different variable naming scheme.
Only differs from GRUBlockCell by variable names.
"""
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
with vs.variable_scope("gates"):
w_ru = vs.get_variable("kernel", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"bias", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
with vs.variable_scope("candidate"):
w_c = vs.get_variable("kernel",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"bias", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
| mit |
salomax/Open-Marketplace | app/sale/messages.py | 3 | 2394 | #!/usr/bin/env python
# coding: utf-8
#
# Copyright 2016, Marcos Salomão.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from protorpc import messages
from protorpc import message_types
from app.product import messages as product
from app.customer import messages as customer
__author__ = "Marcos Salomão"
__email__ = "salomao.marcos@gmail.com"
__copyright__ = "Copyright 2016, Marcos Salomão"
__license__ = "Apache 2.0"
class SaleKeyMessage(messages.Message):
""" Get message for sale key.
"""
id = messages.IntegerField(1)
class SaleGetMessage(messages.Message):
""" Get message for sale.
"""
id = messages.IntegerField(1)
customer = messages.MessageField(
customer.CustomerGetMessage, 2, required=True)
product = messages.MessageField(product.ProductGetMessage, 3, required=True)
quantity = messages.IntegerField(4, required=True)
sale_date = message_types.DateTimeField(5, required=True)
amount = messages.FloatField(6)
fare = messages.FloatField(7)
net_total = messages.FloatField(8)
track_code = messages.StringField(9)
created_date = message_types.DateTimeField(10, required=True)
class SalePostMessage(messages.Message):
""" POST message for sale.
"""
id = messages.IntegerField(1)
customer = messages.MessageField(
customer.CustomerKeyMessage, 2, required=True)
product = messages.MessageField(product.ProductKeyMessage, 3, required=True)
quantity = messages.IntegerField(4, required=True)
sale_date = message_types.DateTimeField(5, required=True)
amount = messages.FloatField(6)
fare = messages.FloatField(7)
net_total = messages.FloatField(8)
track_code = messages.StringField(9)
class SaleCollectionMessage(messages.Message):
""" Sales collection.
"""
items = messages.MessageField(SaleGetMessage, 1, repeated=True)
| apache-2.0 |
snailbob/namebench | nb_third_party/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| apache-2.0 |
supersonicninja/android_kernel_huawei_msm8960 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
zorroblue/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 12 | 4320 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of squared
distances to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/telemetry/telemetry/core/_bitmap.py | 12 | 8002 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Bitmap is a basic wrapper for image pixels. It includes some basic processing
tools: crop, find bounding box of a color and compute histogram of color values.
"""
import array
import base64
import cStringIO
import struct
import subprocess
from telemetry.core import util
from telemetry.core import platform
from telemetry.image_processing import histogram
from telemetry.image_processing import rgba_color
from telemetry.util import support_binaries
util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'png')
import png # pylint: disable=F0401
class _BitmapTools(object):
"""Wraps a child process of bitmaptools and allows for one command."""
CROP_PIXELS = 0
HISTOGRAM = 1
BOUNDING_BOX = 2
def __init__(self, dimensions, pixels):
binary = support_binaries.FindPath(
'bitmaptools',
platform.GetHostPlatform().GetArchName(),
platform.GetHostPlatform().GetOSName())
assert binary, 'You must build bitmaptools first!'
self._popen = subprocess.Popen([binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# dimensions are: bpp, width, height, boxleft, boxtop, boxwidth, boxheight
packed_dims = struct.pack('iiiiiii', *dimensions)
self._popen.stdin.write(packed_dims)
# If we got a list of ints, we need to convert it into a byte buffer.
if type(pixels) is not bytearray:
pixels = bytearray(pixels)
self._popen.stdin.write(pixels)
def _RunCommand(self, *command):
assert not self._popen.stdin.closed, (
'Exactly one command allowed per instance of tools.')
packed_command = struct.pack('i' * len(command), *command)
self._popen.stdin.write(packed_command)
self._popen.stdin.close()
length_packed = self._popen.stdout.read(struct.calcsize('i'))
if not length_packed:
raise Exception(self._popen.stderr.read())
length = struct.unpack('i', length_packed)[0]
return self._popen.stdout.read(length)
def CropPixels(self):
return self._RunCommand(_BitmapTools.CROP_PIXELS)
def Histogram(self, ignore_color, tolerance):
ignore_color_int = -1 if ignore_color is None else int(ignore_color)
response = self._RunCommand(_BitmapTools.HISTOGRAM,
ignore_color_int, tolerance)
out = array.array('i')
out.fromstring(response)
assert len(out) == 768, (
'The ColorHistogram has the wrong number of buckets: %s' % len(out))
return histogram.ColorHistogram(out[:256], out[256:512], out[512:],
ignore_color)
def BoundingBox(self, color, tolerance):
response = self._RunCommand(_BitmapTools.BOUNDING_BOX, int(color),
tolerance)
unpacked = struct.unpack('iiiii', response)
box, count = unpacked[:4], unpacked[-1]
if box[2] < 0 or box[3] < 0:
box = None
return box, count
class Bitmap(object):
"""Utilities for parsing and inspecting a bitmap."""
def __init__(self, bpp, width, height, pixels, metadata=None):
assert bpp in [3, 4], 'Invalid bytes per pixel'
assert width > 0, 'Invalid width'
assert height > 0, 'Invalid height'
assert pixels, 'Must specify pixels'
assert bpp * width * height == len(pixels), 'Dimensions and pixels mismatch'
self._bpp = bpp
self._width = width
self._height = height
self._pixels = pixels
self._metadata = metadata or {}
self._crop_box = None
@property
def bpp(self):
return self._bpp
@property
def width(self):
return self._crop_box[2] if self._crop_box else self._width
@property
def height(self):
return self._crop_box[3] if self._crop_box else self._height
def _PrepareTools(self):
"""Prepares an instance of _BitmapTools which allows exactly one command.
"""
crop_box = self._crop_box or (0, 0, self._width, self._height)
return _BitmapTools((self._bpp, self._width, self._height) + crop_box,
self._pixels)
@property
def pixels(self):
if self._crop_box:
self._pixels = self._PrepareTools().CropPixels()
# pylint: disable=unpacking-non-sequence
_, _, self._width, self._height = self._crop_box
self._crop_box = None
if type(self._pixels) is not bytearray:
self._pixels = bytearray(self._pixels)
return self._pixels
@property
def metadata(self):
self._metadata['size'] = (self.width, self.height)
self._metadata['alpha'] = self.bpp == 4
self._metadata['bitdepth'] = 8
return self._metadata
def GetPixelColor(self, x, y):
pixels = self.pixels
base = self._bpp * (y * self._width + x)
if self._bpp == 4:
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2], pixels[base + 3])
return rgba_color.RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2])
@staticmethod
def FromPng(png_data):
width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
return Bitmap(4 if meta['alpha'] else 3, width, height, pixels, meta)
@staticmethod
def FromPngFile(path):
with open(path, "rb") as f:
return Bitmap.FromPng(f.read())
def WritePngFile(self, path):
with open(path, "wb") as f:
png.Writer(**self.metadata).write_array(f, self.pixels)
def IsEqual(self, other, tolerance=0):
# Dimensions must be equal
if self.width != other.width or self.height != other.height:
return False
# Loop over each pixel and test for equality
if tolerance or self.bpp != other.bpp:
for y in range(self.height):
for x in range(self.width):
c0 = self.GetPixelColor(x, y)
c1 = other.GetPixelColor(x, y)
if not c0.IsEqual(c1, tolerance):
return False
else:
return self.pixels == other.pixels
return True
def Diff(self, other):
# Output dimensions will be the maximum of the two input dimensions
out_width = max(self.width, other.width)
out_height = max(self.height, other.height)
diff = [[0 for x in xrange(out_width * 3)] for x in xrange(out_height)]
# Loop over each pixel and write out the difference
for y in range(out_height):
for x in range(out_width):
if x < self.width and y < self.height:
c0 = self.GetPixelColor(x, y)
else:
c0 = rgba_color.RgbaColor(0, 0, 0, 0)
if x < other.width and y < other.height:
c1 = other.GetPixelColor(x, y)
else:
c1 = rgba_color.RgbaColor(0, 0, 0, 0)
offset = x * 3
diff[y][offset] = abs(c0.r - c1.r)
diff[y][offset+1] = abs(c0.g - c1.g)
diff[y][offset+2] = abs(c0.b - c1.b)
# This particular method can only save to a file, so the result will be
# written into an in-memory buffer and read back into a Bitmap
diff_img = png.from_array(diff, mode='RGB')
output = cStringIO.StringIO()
try:
diff_img.save(output)
diff = Bitmap.FromPng(output.getvalue())
finally:
output.close()
return diff
def GetBoundingBox(self, color, tolerance=0):
return self._PrepareTools().BoundingBox(color, tolerance)
def Crop(self, left, top, width, height):
cur_box = self._crop_box or (0, 0, self._width, self._height)
cur_left, cur_top, cur_width, cur_height = cur_box
if (left < 0 or top < 0 or
(left + width) > cur_width or
(top + height) > cur_height):
raise ValueError('Invalid dimensions')
self._crop_box = cur_left + left, cur_top + top, width, height
return self
def ColorHistogram(self, ignore_color=None, tolerance=0):
return self._PrepareTools().Histogram(ignore_color, tolerance)
| bsd-3-clause |
yeraydiazdiaz/nonrel-blog | django/contrib/admindocs/views.py | 77 | 15064 | import inspect
import os
import re
from django import template
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
@staff_member_required
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': urlresolvers.reverse('admin:index'),
}, context_instance=RequestContext(request))
@staff_member_required
def bookmarklets(request):
admin_root = urlresolvers.reverse('admin:index')
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
@staff_member_required
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'tags': tags
}, context_instance=RequestContext(request))
@staff_member_required
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'filters': filters
}, context_instance=RequestContext(request))
@staff_member_required
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'views': views
}, context_instance=RequestContext(request))
@staff_member_required
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
@staff_member_required
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'models': m_list
}, context_instance=RequestContext(request))
@staff_member_required
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
@staff_member_required
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause |
boundarydevices/skia | platform_tools/android/tests/generate_user_config_tests.py | 67 | 3599 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test generate_user_config.py.
"""
import argparse
import os
import shutil
import sys
import tempfile
import test_variables
import unittest
import utils
sys.path.append(test_variables.GYP_GEN_DIR)
from generate_user_config import generate_user_config as gen_config
# Name of SkUserConfig file.
USER_CONFIG_NAME = 'SkUserConfig-h.txt'
MISSING_FILENAME = 'missing-filename.xxx'
# Path to unchanging Dummy SkUserConfig file.
FULL_DUMMY_PATH = os.path.join(os.path.dirname(__file__), 'inputs',
USER_CONFIG_NAME)
REBASELINE_MSG = ('If you\'ve modified generate_user_config.py, run '
'"generate_user_config_tests.py --rebaseline" to rebaseline')
def generate_dummy_user_config(original_sk_user_config,
require_sk_user_config, target_dir):
# Add an arbitrary set of defines
defines = [ 'SK_BUILD_FOR_ANDROID',
'SK_BUILD_FOR_ANDROID_FRAMEWORK',
'SK_SCALAR_IS_FLOAT',
'foo',
'bar' ]
gen_config(original_sk_user_config=original_sk_user_config,
require_sk_user_config=require_sk_user_config,
target_dir=target_dir, ordered_set=defines)
class GenUserConfigTest(unittest.TestCase):
def test_missing_sk_user_config(self):
tmp = tempfile.mkdtemp()
original = os.path.join(tmp, MISSING_FILENAME)
assert not os.path.exists(original)
# With require_sk_user_config set to True, an AssertionError will be
# thrown when original_sk_user_config is missing.
with self.assertRaises(AssertionError):
ordered_set = [ 'define' ]
gen_config(original_sk_user_config=original,
require_sk_user_config=True,
target_dir=tmp, ordered_set=ordered_set)
# With require_sk_user_config set to False, it is okay for
# original_sk_user_config to be missing.
generate_dummy_user_config(original_sk_user_config=original,
require_sk_user_config=False, target_dir=tmp)
actual_name = os.path.join(tmp, MISSING_FILENAME)
utils.compare_to_expectation(actual_name=actual_name,
expectation_name=MISSING_FILENAME,
assert_true=self.assertTrue,
msg=REBASELINE_MSG)
shutil.rmtree(tmp)
def test_gen_config(self):
tmp = tempfile.mkdtemp()
generate_dummy_user_config(FULL_DUMMY_PATH, True, tmp)
actual_name = os.path.join(tmp, USER_CONFIG_NAME)
utils.compare_to_expectation(actual_name=actual_name,
expectation_name=USER_CONFIG_NAME,
assert_true=self.assertTrue, msg=REBASELINE_MSG)
shutil.rmtree(tmp)
def main():
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(GenUserConfigTest)
results = unittest.TextTestRunner(verbosity=2).run(suite)
print repr(results)
if not results.wasSuccessful():
raise Exception('failed one or more unittests')
def rebaseline():
generate_dummy_user_config(FULL_DUMMY_PATH, True, utils.EXPECTATIONS_DIR)
generate_dummy_user_config(MISSING_FILENAME, False, utils.EXPECTATIONS_DIR)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--rebaseline', help='Rebaseline expectations.',
action='store_true')
args = parser.parse_args()
if args.rebaseline:
rebaseline()
else:
main()
| bsd-3-clause |
pavlova-marina/QGIS | tests/src/python/test_qgspoint.py | 11 | 1790 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPoint.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
from qgis.core import QgsPoint
from utilities import getQgisTestApp, TestCase, unittest
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsPoint(TestCase):
def __init__(self, methodName):
"""Run once on class initialisation."""
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.mPoint = QgsPoint(10.0, 10.0)
def test_Point(self):
myExpectedValue = 10.0
myActualValue = self.mPoint.x()
myMessage = 'Expected: %s Got: %s' % (myExpectedValue, myActualValue)
assert myExpectedValue == myActualValue, myMessage
def test_pointToString(self):
myExpectedValue = '10, 10'
myActualValue = self.mPoint.toString()
myMessage = 'Expected: %s Got: %s' % (myExpectedValue, myActualValue)
assert myExpectedValue == myActualValue, myMessage
def test_hash(self):
a = QgsPoint( 2.0, 1.0 )
b = QgsPoint( 2.0, 2.0 )
c = QgsPoint( 1.0, 2.0 )
d = QgsPoint( 1.0, 1.0 )
e = QgsPoint( 2.0, 1.0 )
assert a.__hash__() != b.__hash__()
assert e.__hash__() == a.__hash__()
mySet = set( [ a, b, c, d, e ] )
assert len( mySet ) == 4
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ptrendx/mxnet | example/reinforcement-learning/dqn/replay_memory.py | 9 | 12097 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import copy
from utils import get_numpy_rng
class ReplayMemory(object):
def __init__(self, history_length, memory_size=1000000, replay_start_size=100,
state_dim=(), action_dim=(), state_dtype='uint8', action_dtype='uint8',
ctx=mx.gpu()):
self.rng = get_numpy_rng()
self.ctx = ctx
assert type(action_dim) is tuple and type(state_dim) is tuple, \
"Must set the dimensions of state and action for replay memory"
self.state_dim = state_dim
if action_dim == (1,):
self.action_dim = ()
else:
self.action_dim = action_dim
self.states = numpy.zeros((memory_size,) + state_dim, dtype=state_dtype)
self.actions = numpy.zeros((memory_size,) + action_dim, dtype=action_dtype)
self.rewards = numpy.zeros(memory_size, dtype='float32')
self.terminate_flags = numpy.zeros(memory_size, dtype='bool')
self.memory_size = memory_size
self.replay_start_size = replay_start_size
self.history_length = history_length
self.top = 0
self.size = 0
def latest_slice(self):
if self.size >= self.history_length:
return self.states.take(numpy.arange(self.top - self.history_length, self.top),
axis=0, mode="wrap")
else:
assert False, "We can only slice from the replay memory if the " \
"replay size is larger than the length of frames we want to take" \
"as the input."
@property
def sample_enabled(self):
return self.size > self.replay_start_size
def clear(self):
"""
Clear all contents in the relay memory
"""
self.states[:] = 0
self.actions[:] = 0
self.rewards[:] = 0
self.terminate_flags[:] = 0
self.top = 0
self.size = 0
def reset(self):
"""
Reset all the flags stored in the replay memory.
It will not clear the inner-content and is a light/quick version of clear()
"""
self.top = 0
self.size = 0
def copy(self):
# TODO Test the copy function
replay_memory = copy.copy(self)
replay_memory.states = numpy.zeros(self.states.shape, dtype=self.states.dtype)
replay_memory.actions = numpy.zeros(self.actions.shape, dtype=self.actions.dtype)
replay_memory.rewards = numpy.zeros(self.rewards.shape, dtype='float32')
replay_memory.terminate_flags = numpy.zeros(self.terminate_flags.shape, dtype='bool')
replay_memory.states[numpy.arange(self.top-self.size, self.top), ::] = \
self.states[numpy.arange(self.top-self.size, self.top)]
replay_memory.actions[numpy.arange(self.top-self.size, self.top)] = \
self.actions[numpy.arange(self.top-self.size, self.top)]
replay_memory.rewards[numpy.arange(self.top-self.size, self.top)] = \
self.rewards[numpy.arange(self.top-self.size, self.top)]
replay_memory.terminate_flags[numpy.arange(self.top-self.size, self.top)] = \
self.terminate_flags[numpy.arange(self.top-self.size, self.top)]
return replay_memory
def append(self, obs, action, reward, terminate_flag):
self.states[self.top] = obs
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminate_flags[self.top] = terminate_flag
self.top = (self.top + 1) % self.memory_size
if self.size < self.memory_size:
self.size += 1
def sample_last(self, batch_size, states, offset):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be "
"bigger than start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.empty(batch_size, dtype='float32')
terminate_flags = numpy.empty(batch_size, dtype='bool')
counter = 0
first_index = self.top - self.history_length - 1
while counter < batch_size:
full_indices = numpy.arange(first_index, first_index + self.history_length+1)
end_index = first_index + self.history_length
if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
first_index -= 1
continue
states[counter + offset] = self.states.take(full_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
counter += 1
first_index -= 1
return actions, rewards, terminate_flags
def sample_mix(self, batch_size, states, offset, current_index):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
"start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.empty((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.empty(batch_size, dtype='float32')
terminate_flags = numpy.empty(batch_size, dtype='bool')
counter = 0
first_index = self.top - self.history_length + current_index
thisid = first_index
while counter < batch_size:
full_indices = numpy.arange(thisid, thisid + self.history_length+1)
end_index = thisid + self.history_length
if numpy.any(self.terminate_flags.take(full_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
thisid -= 1
continue
states[counter+offset] = self.states.take(full_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
counter += 1
thisid = self.rng.randint(low=self.top - self.size, high=self.top - self.history_length-1)
return actions, rewards, terminate_flags
def sample_inplace(self, batch_size, states, offset):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be "
"bigger than start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.zeros(batch_size, dtype='float32')
terminate_flags = numpy.zeros(batch_size, dtype='bool')
counter = 0
while counter < batch_size:
index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length )
transition_indices = numpy.arange(index, index + self.history_length+1)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
if numpy.any(self.terminate_flags.take(initial_indices[0:self.history_length], mode='wrap')):
# Check if terminates in the middle of the sample!
continue
states[counter + offset] = self.states.take(initial_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
# next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
counter += 1
return actions, rewards, terminate_flags
def sample(self, batch_size):
assert self.size >= batch_size and self.replay_start_size >= self.history_length
assert(0 <= self.size <= self.memory_size)
assert(0 <= self.top <= self.memory_size)
if self.size <= self.replay_start_size:
raise ValueError("Size of the effective samples of the ReplayMemory must be bigger than "
"start_size! Currently, size=%d, start_size=%d"
%(self.size, self.replay_start_size))
#TODO Possibly states + inds for less memory access
states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
dtype=self.states.dtype)
actions = numpy.zeros((batch_size,) + self.action_dim, dtype=self.actions.dtype)
rewards = numpy.zeros(batch_size, dtype='float32')
terminate_flags = numpy.zeros(batch_size, dtype='bool')
next_states = numpy.zeros((batch_size, self.history_length) + self.state_dim,
dtype=self.states.dtype)
counter = 0
while counter < batch_size:
index = self.rng.randint(low=self.top - self.size + 1, high=self.top - self.history_length)
transition_indices = numpy.arange(index, index + self.history_length)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
while numpy.any(self.terminate_flags.take(initial_indices, mode='wrap')):
# Check if terminates in the middle of the sample!
index -= 1
transition_indices = numpy.arange(index, index + self.history_length)
initial_indices = transition_indices - 1
end_index = index + self.history_length - 1
states[counter] = self.states.take(initial_indices, axis=0, mode='wrap')
actions[counter] = self.actions.take(end_index, axis=0, mode='wrap')
rewards[counter] = self.rewards.take(end_index, mode='wrap')
terminate_flags[counter] = self.terminate_flags.take(end_index, mode='wrap')
next_states[counter] = self.states.take(transition_indices, axis=0, mode='wrap')
counter += 1
return states, actions, rewards, next_states, terminate_flags
| apache-2.0 |
toshywoshy/ansible | lib/ansible/plugins/httpapi/exos.py | 38 | 10996 | # Copyright (c) 2019 Extreme Networks.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author:
- "Ujwal Komarla (@ujwalkomarla)"
httpapi: exos
short_description: Use EXOS REST APIs to communicate with EXOS platform
description:
- This plugin provides low level abstraction api's to send REST API
requests to EXOS network devices and receive JSON responses.
version_added: "2.8"
"""
import json
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.httpapi import HttpApiBase
import ansible.module_utils.six.moves.http_cookiejar as cookiejar
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.network.common.config import NetworkConfig, dumps
class HttpApi(HttpApiBase):
def __init__(self, *args, **kwargs):
super(HttpApi, self).__init__(*args, **kwargs)
self._device_info = None
self._auth_token = cookiejar.CookieJar()
def login(self, username, password):
auth_path = '/auth/token'
credentials = {'username': username, 'password': password}
self.send_request(path=auth_path, data=json.dumps(credentials), method='POST')
def logout(self):
pass
def handle_httperror(self, exc):
return False
def send_request(self, path, data=None, method='GET', **message_kwargs):
headers = {'Content-Type': 'application/json'}
response, response_data = self.connection.send(path, data, method=method, cookies=self._auth_token, headers=headers, **message_kwargs)
try:
if response.status == 204:
response_data = {}
else:
response_data = json.loads(to_text(response_data.getvalue()))
except ValueError:
raise ConnectionError('Response was not valid JSON, got {0}'.format(
to_text(response_data.getvalue())
))
return response_data
def run_commands(self, commands, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
headers = {'Content-Type': 'application/json'}
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
cmd['command'] = strip_run_script_cli2json(cmd['command'])
output = cmd.pop('output', None)
if output and output not in self.get_option_values().get('output'):
raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output'))))
data = request_builder(cmd['command'])
response, response_data = self.connection.send('/jsonrpc', data, cookies=self._auth_token, headers=headers, method='POST')
try:
response_data = json.loads(to_text(response_data.getvalue()))
except ValueError:
raise ConnectionError('Response was not valid JSON, got {0}'.format(
to_text(response_data.getvalue())
))
if response_data.get('error', None):
raise ConnectionError("Request Error, got {0}".format(response_data['error']))
if not response_data.get('result', None):
raise ConnectionError("Request Error, got {0}".format(response_data))
response_data = response_data['result']
if output and output == 'text':
statusOut = getKeyInResponse(response_data, 'status')
cliOut = getKeyInResponse(response_data, 'CLIoutput')
if statusOut == "ERROR":
raise ConnectionError("Command error({1}) for request {0}".format(cmd['command'], cliOut))
if cliOut is None:
raise ValueError("Response for request {0} doesn't have the CLIoutput field, got {1}".format(cmd['command'], response_data))
response_data = cliOut
responses.append(response_data)
return responses
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'exos'
reply = self.run_commands({'command': 'show switch detail', 'output': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'ExtremeXOS version (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'System Type: +(\S+)', data)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'SysName: +(\S+)', data)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_device_operations(self):
return {
'supports_diff_replace': False, # identify if config should be merged or replaced is supported
'supports_commit': False, # identify if commit is supported by device or not
'supports_rollback': False, # identify if rollback is supported or not
'supports_defaults': True, # identify if fetching running config with default is supported
'supports_commit_comment': False, # identify if adding comment to commit is supported of not
'supports_onbox_diff': False, # identify if on box diff capability is supported or not
'supports_generate_diff': True, # identify if diff capability is supported within plugin
'supports_multiline_delimiter': False, # identify if multiline demiliter is supported within config
'supports_diff_match': True, # identify if match is supported
'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported
'supports_config_replace': False, # identify if running config replace with candidate config is supported
'supports_admin': False, # identify if admin configure mode is supported or not
'supports_commit_label': False # identify if commit label is supported or not
}
def get_option_values(self):
return {
'format': ['text', 'json'],
'diff_match': ['line', 'strict', 'exact', 'none'],
'diff_replace': ['line', 'block'],
'output': ['text', 'json']
}
def get_capabilities(self):
result = {}
result['rpc'] = ['get_default_flag', 'run_commands', 'get_config', 'send_request', 'get_capabilities', 'get_diff']
result['device_info'] = self.get_device_info()
result['device_operations'] = self.get_device_operations()
result.update(self.get_option_values())
result['network_api'] = 'exosapi'
return json.dumps(result)
def get_default_flag(self):
# The flag to modify the command to collect configuration with defaults
return 'detail'
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations['supports_generate_diff']:
raise ValueError("candidate configuration is required to generate diff")
if diff_match not in option_values['diff_match']:
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
if diff_replace not in option_values['diff_replace']:
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
candidate_obj.load(candidate)
if running and diff_match != 'none' and diff_replace != 'config':
# running configuration
running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
else:
configdiffobjs = candidate_obj.items
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
return diff
def get_config(self, source='running', format='text', flags=None):
options_values = self.get_option_values()
if format not in options_values['format']:
raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format'])))
lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'}
if source not in lookup:
raise ValueError("fetching configuration from %s is not supported" % source)
cmd = {'command': lookup[source], 'output': 'text'}
if source == 'startup':
reply = self.run_commands({'command': 'show switch', 'format': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE)
if match:
cmd['command'] += match.group(1)
else:
# No Startup(/Selected) Config
return {}
cmd['command'] += ' '.join(to_list(flags))
cmd['command'] = cmd['command'].strip()
return self.run_commands(cmd)[0]
def request_builder(command, reqid=""):
return json.dumps(dict(jsonrpc='2.0', id=reqid, method='cli', params=to_list(command)))
def strip_run_script_cli2json(command):
if to_text(command, errors="surrogate_then_replace").startswith('run script cli2json.py'):
command = str(command).replace('run script cli2json.py', '')
return command
def getKeyInResponse(response, key):
keyOut = None
for item in response:
if key in item:
keyOut = item[key]
break
return keyOut
| gpl-3.0 |
zeispro/Plugin.Video.America | servers/rapidtube.py | 42 | 2007 | # -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para Rapidtube
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
# Returns an array of possible video url's from the page_url
def get_video_url( page_url , premium = False , user="" , password="" , video_password="" ):
logger.info("[rapidtube.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
# Descarga la página
headers = [ ['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'],['Referer','http://www.rapidtube.com/'] ]
data = scrapertools.cache_page(page_url , headers = headers)
data = data.replace('"',"'")
print data
# Extrae el vídeo
patronvideos = "file: '([^']+)'"
matches = re.compile(patronvideos,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
for match in matches:
video_urls.append( [ "."+match.rsplit('.',1)[1]+" [rapidtube]" , match+"?start=0" ] )
for video_url in video_urls:
logger.info("[rapidtube.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
patronvideos = '(http://(?:\w+\.)?rapidtube.com/[a-zA-Z0-9]+)'
logger.info("[rapidtube.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[rapidtube]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'rapidtube' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve | gpl-2.0 |
openfun/edx-platform | pavelib/utils/test/suites/bokchoy_suite.py | 16 | 6412 | """
Class used for defining and running Bok Choy acceptance test suite
"""
from time import sleep
from paver.easy import sh
from pavelib.utils.test.suites.suite import TestSuite
from pavelib.utils.envs import Env
from pavelib.utils.test import bokchoy_utils
from pavelib.utils.test import utils as test_utils
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text # pylint: disable-msg=invalid-name
__test__ = False # do not collect
class BokChoyTestSuite(TestSuite):
"""
TestSuite for running Bok Choy tests
Properties (below is a subset):
test_dir - parent directory for tests
log_dir - directory for test output
report_dir - directory for reports (e.g., coverage) related to test execution
xunit_report - directory for xunit-style output (xml)
fasttest - when set, skip various set-up tasks (e.g., collectstatic)
serversonly - prepare and run the necessary servers, only stopping when interrupted with Ctrl-C
testsonly - assume servers are running (as per above) and run tests with no setup or cleaning of environment
test_spec - when set, specifies test files, classes, cases, etc. See platform doc.
default_store - modulestore to use when running tests (split or draft)
"""
def __init__(self, *args, **kwargs):
super(BokChoyTestSuite, self).__init__(*args, **kwargs)
self.test_dir = Env.BOK_CHOY_DIR / kwargs.get('test_dir', 'tests')
self.log_dir = Env.BOK_CHOY_LOG_DIR
self.report_dir = Env.BOK_CHOY_REPORT_DIR
self.xunit_report = self.report_dir / "xunit.xml"
self.cache = Env.BOK_CHOY_CACHE
self.fasttest = kwargs.get('fasttest', False)
self.serversonly = kwargs.get('serversonly', False)
self.testsonly = kwargs.get('testsonly', False)
self.test_spec = kwargs.get('test_spec', None)
self.default_store = kwargs.get('default_store', None)
self.verbosity = kwargs.get('verbosity', 2)
self.extra_args = kwargs.get('extra_args', '')
self.har_dir = self.log_dir / 'hars'
self.imports_dir = kwargs.get('imports_dir', None)
def __enter__(self):
super(BokChoyTestSuite, self).__enter__()
# Ensure that we have a directory to put logs and reports
self.log_dir.makedirs_p()
self.har_dir.makedirs_p()
self.report_dir.makedirs_p()
test_utils.clean_reports_dir()
if not (self.fasttest or self.skip_clean):
test_utils.clean_test_files()
msg = colorize('green', "Checking for mongo, memchache, and mysql...")
print msg
bokchoy_utils.check_services()
if not self.testsonly:
self.prepare_bokchoy_run()
msg = colorize('green', "Confirming servers have started...")
print msg
bokchoy_utils.wait_for_test_servers()
if self.serversonly:
self.run_servers_continuously()
def __exit__(self, exc_type, exc_value, traceback):
super(BokChoyTestSuite, self).__exit__(exc_type, exc_value, traceback)
msg = colorize('green', "Cleaning up databases...")
print msg
# Clean up data we created in the databases
sh("./manage.py lms --settings bok_choy flush --traceback --noinput")
bokchoy_utils.clear_mongo()
def prepare_bokchoy_run(self):
"""
Sets up and starts servers for a Bok Choy run. If --fasttest is not
specified then static assets are collected
"""
sh("{}/scripts/reset-test-db.sh".format(Env.REPO_ROOT))
if not self.fasttest:
self.generate_optimized_static_assets()
# Clear any test data already in Mongo or MySQLand invalidate
# the cache
bokchoy_utils.clear_mongo()
self.cache.flush_all()
sh(
"DEFAULT_STORE={default_store}"
" ./manage.py lms --settings bok_choy loaddata --traceback"
" common/test/db_fixtures/*.json".format(
default_store=self.default_store,
)
)
if self.imports_dir:
sh(
"DEFAULT_STORE={default_store}"
" ./manage.py cms --settings=bok_choy import {import_dir}".format(
default_store=self.default_store,
import_dir=self.imports_dir
)
)
# Ensure the test servers are available
msg = colorize('green', "Confirming servers are running...")
print msg
bokchoy_utils.start_servers(self.default_store)
def run_servers_continuously(self):
"""
Infinite loop. Servers will continue to run in the current session unless interrupted.
"""
print 'Bok-choy servers running. Press Ctrl-C to exit...\n'
print 'Note: pressing Ctrl-C multiple times can corrupt noseid files and system state. Just press it once.\n'
while True:
try:
sleep(10000)
except KeyboardInterrupt:
print "Stopping bok-choy servers.\n"
break
@property
def cmd(self):
"""
This method composes the nosetests command to send to the terminal. If nosetests aren't being run,
the command returns an empty string.
"""
# Default to running all tests if no specific test is specified
if not self.test_spec:
test_spec = self.test_dir
else:
test_spec = self.test_dir / self.test_spec
# Skip any additional commands (such as nosetests) if running in
# servers only mode
if self.serversonly:
return ""
# Construct the nosetests command, specifying where to save
# screenshots and XUnit XML reports
cmd = [
"DEFAULT_STORE={}".format(self.default_store),
"SCREENSHOT_DIR='{}'".format(self.log_dir),
"BOK_CHOY_HAR_DIR='{}'".format(self.har_dir),
"SELENIUM_DRIVER_LOG_DIR='{}'".format(self.log_dir),
"nosetests",
test_spec,
"--with-xunit",
"--xunit-file={}".format(self.xunit_report),
"--verbosity={}".format(self.verbosity),
]
if self.pdb:
cmd.append("--pdb")
cmd.append(self.extra_args)
cmd = (" ").join(cmd)
return cmd
| agpl-3.0 |
ch3ll0v3k/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.