repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Witia1/olympia | sites/prod/settings_base.py | 1 | 5055 | import logging
import os
import dj_database_url
from lib.settings_base import CACHE_PREFIX, KNOWN_PROXIES, LOGGING, HOSTNAME
from .. import splitstrip
import private_base as private
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
SEND_REAL_EMAIL = True
ENGAGE_ROBOTS = True
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 300
}
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.MemcachedCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
# Celery
BROKER_URL = private.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
BROKER_CONNECTION_TIMEOUT = 0.1
NETAPP_STORAGE_ROOT = private.NETAPP_STORAGE_ROOT
NETAPP_STORAGE = NETAPP_STORAGE_ROOT + '/shared_storage'
GUARDED_ADDONS_PATH = NETAPP_STORAGE_ROOT + '/guarded-addons'
MEDIA_ROOT = NETAPP_STORAGE + '/uploads'
# Must be forced in settings because name => path can't be dyncamically
# computed: reviewer_attachmentS VS reviewer_attachment.
# TODO: rename folder on file system.
# (One can also just rename the setting, but this will not be consistent
# with the naming scheme.)
REVIEWER_ATTACHMENTS_PATH = MEDIA_ROOT + '/reviewer_attachment'
HERA = []
LOG_LEVEL = logging.DEBUG
LOGGING['loggers'].update({
'adi.updatecountsfromfile': {'level': logging.INFO},
'amqp': {'level': logging.WARNING},
'raven': {'level': logging.WARNING},
'requests': {'level': logging.WARNING},
'z.addons': {'level': logging.INFO},
'z.task': {'level': logging.DEBUG},
'z.hera': {'level': logging.INFO},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': REDIS_BACKEND,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = (
'https://www.google.com/recaptcha/api/challenge?k=%s' %
RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = None
PERF_TEST_URL = (
'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi')
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'addons'
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
# ES_INDEXES doesn't change for prod.
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
EMAIL_BLACKLIST = private.EMAIL_BLACKLIST
NEW_FEATURES = True
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
XSENDFILE_HEADER = 'X-Accel-Redirect'
GOOGLE_ANALYTICS_CREDENTIALS = private.GOOGLE_ANALYTICS_CREDENTIALS
GOOGLE_API_CREDENTIALS = private.GOOGLE_API_CREDENTIALS
GEOIP_URL = 'http://geo.marketplace.firefox.com'
NEWRELIC_WHITELIST = ['web1.addons.phx1.mozilla.com',
'web10.addons.phx1.mozilla.com',
'web20.addons.phx1.mozilla.com',
'web1.mktweb.services.phx1.mozilla.com',
'web4.mktweb.services.phx1.mozilla.com']
NEWRELIC_ENABLE = HOSTNAME in NEWRELIC_WHITELIST
AES_KEYS = private.AES_KEYS
PROD_DETAILS_DIR = os.path.join(NETAPP_STORAGE, 'product_details')
| bsd-3-clause |
adaussy/eclipse-monkey-revival | plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_codecencodings_iso2022.py | 71 | 1464 | #!/usr/bin/env python
#
# Codec encoding tests for ISO 2022 encodings.
from test import test_support
from test import test_multibytecodec_support
import unittest
COMMON_CODEC_TESTS = (
# invalid bytes
(b'ab\xFFcd', 'replace', u'ab\uFFFDcd'),
(b'ab\x1Bdef', 'replace', u'ab\x1Bdef'),
(b'ab\x1B$def', 'replace', u'ab\uFFFD'),
)
class Test_ISO2022_JP(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp'
tstring = test_multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'ab\x1BNdef'),
)
class Test_ISO2022_JP2(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp_2'
tstring = test_multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'abdef'),
)
class Test_ISO2022_KR(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_kr'
tstring = test_multibytecodec_support.load_teststring('iso2022_kr')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'ab\x1BNdef'),
)
# iso2022_kr.txt cannot be used to test "chunk coding": the escape
# sequence is only written on the first line
def test_chunkcoding(self):
pass
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| epl-1.0 |
emergebtc/muddery | evennia/evennia/scripts/tests.py | 6 | 1534 | # this is an optimized version only available in later Django versions
from django.utils.unittest import TestCase
from evennia.scripts.models import ScriptDB, ObjectDoesNotExist
from evennia.utils.create import create_script
from evennia.scripts.scripts import DoNothing
class TestScriptDB(TestCase):
"Check the singleton/static ScriptDB object works correctly"
def setUp(self):
self.scr = create_script(DoNothing)
def tearDown(self):
try:
self.scr.delete()
except ObjectDoesNotExist:
pass
del self.scr
def test_delete(self):
"Check the script is removed from the database"
self.scr.delete()
self.assertFalse(self.scr in ScriptDB.objects.get_all_scripts())
def test_double_delete(self):
"What should happen? Isn't it already deleted?"
with self.assertRaises(ObjectDoesNotExist):
self.scr.delete()
self.scr.delete()
def test_deleted_script_fails_start(self):
"Would it ever be necessary to start a deleted script?"
self.scr.delete()
with self.assertRaises(ObjectDoesNotExist): # See issue #509
self.scr.start()
# Check the script is not recreated as a side-effect
self.assertFalse(self.scr in ScriptDB.objects.get_all_scripts())
def test_deleted_script_is_invalid(self):
"Can deleted scripts be said to be valid?"
self.scr.delete()
self.assertFalse(self.scr.is_valid()) # assertRaises? See issue #509
| bsd-3-clause |
leiferikb/bitpop | build/scripts/master/factory/webrtc_factory.py | 1 | 4810 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master.factory import chromium_factory
from master.factory import gclient_factory
from master.factory import chromium_commands
import config
class WebRTCFactory(chromium_factory.ChromiumFactory):
CUSTOM_VARS_ROOT_DIR = ('root_dir', 'src')
# Can't use the same Valgrind constant as in chromium_factory.py, since WebRTC
# uses another path (use_relative_paths=True in DEPS).
CUSTOM_DEPS_VALGRIND = ('third_party/valgrind',
config.Master.trunk_url + '/deps/third_party/valgrind/binaries')
CUSTOM_DEPS_TSAN_WIN = ('third_party/tsan',
config.Master.trunk_url + '/deps/third_party/tsan')
def __init__(self, build_dir, target_platform, nohooks_on_update=False,
custom_deps_list=None, target_os=None):
"""Creates a WebRTC factory.
This factory can also be used to build stand-alone projects.
Args:
build_dir: Directory to perform the build relative to. Usually this is
trunk/build for WebRTC and other projects.
target_platform: Platform, one of 'win32', 'darwin', 'linux2'
nohooks_on_update: If True, no hooks will be executed in the update step.
custom_deps_list: List of tuples to override directories in the gclient
spec.
target_os: Used to sync down OS-specific dependencies, if specified.
"""
chromium_factory.ChromiumFactory.__init__(
self, build_dir, target_platform=target_platform,
nohooks_on_update=nohooks_on_update, target_os=target_os)
svn_url = config.Master.webrtc_url + '/trunk'
custom_deps_list = custom_deps_list or []
# Use root_dir=src since many Chromium scripts rely on that path.
custom_vars_list = [self.CUSTOM_VARS_ROOT_DIR]
# Overwrite solutions of ChromiumFactory since we sync WebRTC, not Chromium.
self._solutions = []
self._solutions.append(gclient_factory.GClientSolution(
svn_url, name='src', custom_deps_list=custom_deps_list,
custom_vars_list=custom_vars_list))
if config.Master.webrtc_limited_url:
self._solutions.append(gclient_factory.GClientSolution(
config.Master.webrtc_limited_url, name='webrtc-limited',
custom_vars_list=custom_vars_list))
def WebRTCFactory(self, target='Debug', clobber=False, tests=None, mode=None,
slave_type='BuilderTester', options=None,
compile_timeout=1200, build_url=None, project=None,
factory_properties=None, gclient_deps=None):
options = options or ''
tests = tests or []
factory_properties = factory_properties or {}
factory_properties['gclient_env'] = \
factory_properties.get('gclient_env', {}).copy()
if factory_properties.get('needs_valgrind'):
self._solutions[0].custom_deps_list = [self.CUSTOM_DEPS_VALGRIND]
elif factory_properties.get('needs_tsan_win'):
self._solutions[0].custom_deps_list = [self.CUSTOM_DEPS_TSAN_WIN]
elif factory_properties.get('needs_drmemory'):
if 'drmemory.DEPS' not in [s.name for s in self._solutions]:
self._solutions.append(gclient_factory.GClientSolution(
config.Master.trunk_url +
'/deps/third_party/drmemory/drmemory.DEPS',
'drmemory.DEPS'))
# Ensure GYP errors out if files referenced in .gyp files are missing.
self.ForceMissingFilesToBeFatal(project, factory_properties['gclient_env'])
factory = self.BuildFactory(target, clobber, tests, mode, slave_type,
options, compile_timeout, build_url, project,
factory_properties, gclient_deps)
# Get the factory command object to create new steps to the factory.
cmds = chromium_commands.ChromiumCommands(factory, target, self._build_dir,
self._target_platform)
# Override test runner script paths with our own that can run any test and
# have our suppressions configured.
valgrind_script_path = cmds.PathJoin('src', 'tools', 'valgrind-webrtc')
cmds._posix_memory_tests_runner = cmds.PathJoin(valgrind_script_path,
'webrtc_tests.sh')
cmds._win_memory_tests_runner = cmds.PathJoin(valgrind_script_path,
'webrtc_tests.bat')
if (self._target_platform == 'win32' and factory_properties.get(
'syzyasan')):
cmds.AddWindowsSyzyASanStep()
# Add check/start step for virtual webcams, if needed.
if factory_properties.get('virtual_webcam'):
cmds.AddVirtualWebcamCheck()
cmds.AddWebRTCTests(tests, factory_properties)
return factory
| gpl-3.0 |
ifuding/Kaggle | TCCC/Code/philly/PoolGRU.py | 1 | 10263 | import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
from sklearn import feature_extraction, ensemble, decomposition, pipeline
from sklearn.model_selection import KFold
# from textblob import TextBlob
from nfold_train import nfold_train, models_eval
import time
from time import gmtime, strftime
from tensorflow.python.keras.preprocessing.text import Tokenizer, text_to_word_sequence
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from data_helper import data_helper
import shutil
import os
from contextlib import contextmanager
# from nltk.stem import PorterStemmer
# ps = PorterStemmer()
import gensim
from CNN_Keras import CNN_Model, get_word2vec_embedding
zpolarity = {0:'zero',1:'one',2:'two',3:'three',4:'four',5:'five',6:'six',7:'seven',8:'eight',9:'nine',10:'ten'}
zsign = {-1:'negative', 0.: 'neutral', 1:'positive'}
flags = tf.app.flags
flags.DEFINE_string('input-training-data-path', "../../Data/", 'data dir override')
flags.DEFINE_string('output-model-path', ".", 'model dir override')
flags.DEFINE_string('model_type', "cnn", 'model type')
flags.DEFINE_integer('vocab_size', 300000, 'vocab size')
flags.DEFINE_integer('max_seq_len', 100, 'max sequence length')
flags.DEFINE_integer('nfold', 10, 'number of folds')
flags.DEFINE_integer('ensemble_nfold', 5, 'number of ensemble models')
flags.DEFINE_integer('emb_dim', 300, 'term embedding dim')
flags.DEFINE_string('rnn_unit', 0, 'RNN Units')
flags.DEFINE_integer('epochs', 1, 'number of Epochs')
flags.DEFINE_integer('batch_size', 128, 'Batch size')
flags.DEFINE_bool("load_wv_model", True, "Whether to load word2vec model")
flags.DEFINE_string('wv_model_type', "fast_text", 'word2vec model type')
flags.DEFINE_string('wv_model_file', "wiki.en.vec.indata", 'word2vec model file')
flags.DEFINE_bool("char_split", False, "Whether to split text into character")
flags.DEFINE_string('filter_size', 100, 'CNN filter size')
flags.DEFINE_bool("fix_wv_model", True, "Whether to fix word2vec model")
flags.DEFINE_integer('batch_interval', 1000, 'batch print interval')
flags.DEFINE_float("emb_dropout", 0, "embedding dropout")
flags.DEFINE_string('full_connect_hn', "64, 32", 'full connect hidden units')
flags.DEFINE_float("full_connect_dropout", 0, "full connect drop out")
flags.DEFINE_string('vdcnn_filters', "64, 128, 256", 'vdcnn filters')
flags.DEFINE_integer('vdcc_top_k', 1, 'vdcc top_k')
flags.DEFINE_bool("separate_label_layer", False, "Whether to separate label layer")
flags.DEFINE_bool("stem", False, "Whether to stem")
flags.DEFINE_bool("resnet_hn", False, "Whether to concatenate hn and rcnn")
flags.DEFINE_integer('letter_num', 3, 'letter number to aggregate')
flags.DEFINE_string('kernel_size_list', "1,2,3,4,5,6,7", 'kernel size list')
flags.DEFINE_float("rnn_input_dropout", 0, "rnn input drop out")
flags.DEFINE_float("rnn_state_dropout", 0, "rnn state drop out")
flags.DEFINE_bool("stacking", False, "Whether to stacking")
flags.DEFINE_bool("uniform_init_emb", False, "Whether to uniform init the embedding")
flags.DEFINE_bool("load_stacking_data", False, "Whether to load stacking data")
FLAGS = flags.FLAGS
def load_data():
train = pd.read_csv(FLAGS.input_training_data_path + '/train.csv') #.iloc[:200]
test = pd.read_csv(FLAGS.input_training_data_path + '/test.csv') #.iloc[:200]
# sub1 = pd.read_csv(data_dir + '/submission_ensemble.csv')
nrow = train.shape[0]
print("Train Size: {0}".format(nrow))
print("Test Size: {0}".format(test.shape[0]))
coly = [c for c in train.columns if c not in ['id','comment_text']]
print("Label columns: {0}".format(coly))
y = train[coly]
tid = test['id'].values
if FLAGS.load_stacking_data:
data_dir = "../../Data/2fold/"
svd_features = np.load(data_dir + 'svd.npy')
svd_train = svd_features[:nrow]
svd_test = svd_features[nrow:]
kf = KFold(n_splits=2, shuffle=False)
for train_index, test_index in kf.split(svd_train):
svd_train_part = svd_train[test_index]
break
train_data = np.load(data_dir + 'stacking_train_data.npy')
print(train_data.shape, svd_train_part.shape)
train_data = np.c_[train_data, svd_train_part]
train_label = np.load(data_dir + 'stacking_train_label.npy')
# train_data = train_data[:100]
# train_label = train_label[:100]
test_data = np.load(data_dir + 'stacking_test_data.npy')
emb_weight = None
else:
df = pd.concat([train['comment_text'], test['comment_text']], axis=0)
df = df.fillna("unknown")
data = df.values
# Text to sequence
@contextmanager
def timer(name):
"""
Taken from Konstantin Lopuhin https://www.kaggle.com/lopuhin
in script named : Mercari Golf: 0.3875 CV in 75 LOC, 1900 s
https://www.kaggle.com/lopuhin/mercari-golf-0-3875-cv-in-75-loc-1900-s
"""
t0 = time.time()
yield
print('[{0}] done in {1} s'.format(name, time.time() - t0))
with timer("Performing stemming"):
if FLAGS.stem:
# stem_sentence = lambda s: " ".join(ps.stem(word) for word in s.strip().split())
data = [gensim.parsing.stem_text(comment) for comment in data]
print('Tokenizer...')
if not FLAGS.char_split:
tokenizer = Tokenizer(num_words = FLAGS.vocab_size)
tokenizer.fit_on_texts(data)
data = tokenizer.texts_to_sequences(data)
data = pad_sequences(data, maxlen = FLAGS.max_seq_len)
if FLAGS.load_wv_model:
emb_weight = get_word2vec_embedding(location = FLAGS.input_training_data_path + FLAGS.wv_model_file, \
tokenizer = tokenizer, nb_words = FLAGS.vocab_size, embed_size = FLAGS.emb_dim, \
model_type = FLAGS.wv_model_type, uniform_init_emb = FLAGS.uniform_init_emb)
else:
if FLAGS.uniform_init_emb:
emb_weight = np.random.uniform(0, 1, (FLAGS.vocab_size, FLAGS.emb_dim))
else:
emb_weight = np.zeros((FLAGS.vocab_size, FLAGS.emb_dim))
else:
tokenizer = None
data_helper = data_helper(sequence_max_length = FLAGS.max_seq_len, \
wv_model_path = FLAGS.input_training_data_path + FLAGS.wv_model_file, \
letter_num = FLAGS.letter_num, emb_dim = FLAGS.emb_dim, load_wv_model = FLAGS.load_wv_model)
data, emb_weight, FLAGS.vocab_size = data_helper.text_to_triletter_sequence(data)
train_data, train_label = data[:nrow], y.values[:nrow]
test_data = data[nrow:]
return train_data, train_label, test_data, coly, tid, emb_weight
def sub(mdoels, stacking_data = None, stacking_label = None, stacking_test_data = None, test = None, \
scores_text = None, coly = None, tid = None, sub_re = None):
tmp_model_dir = "./model_dir/"
if not os.path.isdir(tmp_model_dir):
os.makedirs(tmp_model_dir, exist_ok=True)
if FLAGS.stacking:
np.save(os.path.join(tmp_model_dir, "stacking_train_data.npy"), stacking_data)
np.save(os.path.join(tmp_model_dir, "stacking_train_label.npy"), stacking_label)
np.save(os.path.join(tmp_model_dir, "stacking_test_data.npy"), stacking_test_data)
else:
sub2 = pd.DataFrame(np.zeros((test.shape[0], len(coly))), columns = coly)
if FLAGS.load_stacking_data:
sub2[coly] = sub_re
else:
sub2[coly] = models_eval(models, test)
sub2['id'] = tid
for c in coly:
sub2[c] = sub2[c].clip(0+1e12, 1-1e12)
blend = sub2 #blend[sub2.columns]
time_label = strftime('_%Y_%m_%d_%H_%M_%S', gmtime())
sub_name = tmp_model_dir + "sub" + time_label + ".csv"
blend.to_csv(sub_name, index=False)
scores_text_frame = pd.DataFrame(scores_text, columns = ["score_text"])
score_text_file = tmp_model_dir + "score_text" + time_label + ".csv"
scores_text_frame.to_csv(score_text_file, index=False)
scores = scores_text_frame["score_text"]
for i in range(FLAGS.epochs):
scores_epoch = scores.loc[scores.str.startswith('epoch:{0}'.format(i + 1))].map(lambda s: float(s.split()[1]))
print ("Epoch{0} mean:{1} std:{2} min:{3} max:{4} median:{5}".format(i + 1, \
scores_epoch.mean(), scores_epoch.std(), scores_epoch.min(), scores_epoch.max(), scores_epoch.median()))
if not os.path.isdir(FLAGS.output_model_path):
os.makedirs(FLAGS.output_model_path, exist_ok=True)
for fileName in os.listdir(tmp_model_dir):
dst_file = os.path.join(FLAGS.output_model_path, fileName)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(os.path.join(tmp_model_dir, fileName), FLAGS.output_model_path)
if __name__ == "__main__":
print("Training------")
scores_text = []
train_data, train_label, test_data, coly, tid, emb_weight = load_data()
sub_re = np.zeros((test_data.shape[0], len(coly)))
if not FLAGS.load_stacking_data:
# for i in range(train_label.shape[1]):
models, stacking_data, stacking_label, stacking_test_data = nfold_train(train_data, train_label, flags = FLAGS, \
model_types = [FLAGS.model_type], scores = scores_text, emb_weight = emb_weight, test_data = test_data)
#, valide_data = train_data, valide_label = train_label)
else:
for i in range(train_label.shape[1]):
models, stacking_data, stacking_label, stacking_test_data = nfold_train(train_data, train_label[:, i], flags = FLAGS, \
model_types = [FLAGS.model_type], scores = scores_text, emb_weight = emb_weight, test_data = test_data \
# , valide_data = train_data[:100], valide_label = train_label[:100, i]
)
sub_re[:, i] = models_eval(models, test_data)
sub(models, stacking_data = stacking_data, stacking_label = stacking_label, stacking_test_data = stacking_test_data, \
test = test_data, scores_text = scores_text, coly = coly, tid = tid, sub_re = sub_re) | apache-2.0 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_string.py | 94 | 9159 | import unittest, string
from test import test_support, string_tests
from UserList import UserList
class StringTest(
string_tests.CommonTest,
string_tests.MixinStrStringUserStringTest
):
type2test = str
def checkequal(self, result, object, methodname, *args):
realresult = getattr(string, methodname)(object, *args)
self.assertEqual(
result,
realresult
)
def checkraises(self, exc, object, methodname, *args):
self.assertRaises(
exc,
getattr(string, methodname),
object,
*args
)
def checkcall(self, object, methodname, *args):
getattr(string, methodname)(object, *args)
def test_join(self):
# These are the same checks as in string_test.ObjectTest.test_join
# but the argument order ist different
self.checkequal('a b c d', ['a', 'b', 'c', 'd'], 'join', ' ')
self.checkequal('abcd', ('a', 'b', 'c', 'd'), 'join', '')
self.checkequal('w x y z', string_tests.Sequence(), 'join', ' ')
self.checkequal('abc', ('abc',), 'join', 'a')
self.checkequal('z', UserList(['z']), 'join', 'a')
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), ['a', 'b', 'c'], 'join', unicode('.'))
self.checkequal(unicode('a.b.c'), [unicode('a'), 'b', 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', unicode('b'), 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', 'b', unicode('c')], 'join', '.')
self.checkraises(TypeError, ['a', unicode('b'), 3], 'join', '.')
for i in [5, 25, 125]:
self.checkequal(
((('a' * i) + '-') * i)[:-1],
['a' * i] * i, 'join', '-')
self.checkequal(
((('a' * i) + '-') * i)[:-1],
('a' * i,) * i, 'join', '-')
self.checkraises(TypeError, string_tests.BadSeq1(), 'join', ' ')
self.checkequal('a b c', string_tests.BadSeq2(), 'join', ' ')
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
class ModuleTest(unittest.TestCase):
def test_attrs(self):
string.whitespace
string.lowercase
string.uppercase
string.letters
string.digits
string.hexdigits
string.octdigits
string.punctuation
string.printable
def test_atoi(self):
self.assertEqual(string.atoi(" 1 "), 1)
self.assertRaises(ValueError, string.atoi, " 1x")
self.assertRaises(ValueError, string.atoi, " x1 ")
def test_atol(self):
self.assertEqual(string.atol(" 1 "), 1L)
self.assertRaises(ValueError, string.atol, " 1x ")
self.assertRaises(ValueError, string.atol, " x1 ")
def test_atof(self):
self.assertAlmostEqual(string.atof(" 1 "), 1.0)
self.assertRaises(ValueError, string.atof, " 1x ")
self.assertRaises(ValueError, string.atof, " x1 ")
def test_maketrans(self):
transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(string.maketrans('abc', 'xyz'), transtable)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzq')
def test_capwords(self):
self.assertEqual(string.capwords('abc def ghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\tdef\nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\t def \nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC DEF GHI'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC-DEF-GHI', '-'), 'Abc-Def-Ghi')
self.assertEqual(string.capwords('ABC-def DEF-ghi GHI'), 'Abc-def Def-ghi Ghi')
self.assertEqual(string.capwords(' aBc DeF '), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t'), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t', '\t'), '\tAbc\tDef\t')
def test_formatter(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("foo"), "foo")
self.assertEqual(fmt.format("foo{0}", "bar"), "foobar")
self.assertEqual(fmt.format("foo{1}{0}-{1}", "bar", 6), "foo6bar-6")
self.assertEqual(fmt.format("-{arg!r}-", arg='test'), "-'test'-")
# override get_value ############################################
class NamespaceFormatter(string.Formatter):
def __init__(self, namespace={}):
string.Formatter.__init__(self)
self.namespace = namespace
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
# Check explicitly passed arguments first
return kwds[key]
except KeyError:
return self.namespace[key]
else:
string.Formatter.get_value(key, args, kwds)
fmt = NamespaceFormatter({'greeting':'hello'})
self.assertEqual(fmt.format("{greeting}, world!"), 'hello, world!')
# override format_field #########################################
class CallFormatter(string.Formatter):
def format_field(self, value, format_spec):
return format(value(), format_spec)
fmt = CallFormatter()
self.assertEqual(fmt.format('*{0}*', lambda : 'result'), '*result*')
# override convert_field ########################################
class XFormatter(string.Formatter):
def convert_field(self, value, conversion):
if conversion == 'x':
return None
return super(XFormatter, self).convert_field(value, conversion)
fmt = XFormatter()
self.assertEqual(fmt.format("{0!r}:{0!x}", 'foo', 'foo'), "'foo':None")
# override parse ################################################
class BarFormatter(string.Formatter):
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
def parse(self, format_string):
for field in format_string.split('|'):
if field[0] == '+':
# it's markup
field_name, _, format_spec = field[1:].partition(':')
yield '', field_name, format_spec, None
else:
yield field, None, None, None
fmt = BarFormatter()
self.assertEqual(fmt.format('*|+0:^10s|*', 'foo'), '* foo *')
# test all parameters used
class CheckAllUsedFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
# Track which arguments actually got used
unused_args = set(kwargs.keys())
unused_args.update(range(0, len(args)))
for arg in used_args:
unused_args.remove(arg)
if unused_args:
raise ValueError("unused arguments")
fmt = CheckAllUsedFormatter()
self.assertEqual(fmt.format("{0}", 10), "10")
self.assertEqual(fmt.format("{0}{i}", 10, i=100), "10100")
self.assertEqual(fmt.format("{0}{i}{1}", 10, 20, i=100), "1010020")
self.assertRaises(ValueError, fmt.format, "{0}{i}{1}", 10, 20, i=100, j=0)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20, i=100)
self.assertRaises(ValueError, fmt.format, "{i}", 10, 20, i=100)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
class BytesAliasTest(unittest.TestCase):
def test_builtin(self):
self.assertTrue(str is bytes)
def test_syntax(self):
self.assertEqual(b"spam", "spam")
self.assertEqual(br"egg\foo", "egg\\foo")
self.assertTrue(type(b""), str)
self.assertTrue(type(br""), str)
def test_main():
test_support.run_unittest(StringTest, ModuleTest, BytesAliasTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
eayunstack/fuel-web | nailgun/nailgun/api/v1/handlers/node.py | 2 | 10150 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with nodes
"""
from datetime import datetime
import web
from nailgun.api.v1.handlers.base import BaseHandler
from nailgun.api.v1.handlers.base import CollectionHandler
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.handlers.base import SingleHandler
from nailgun.api.v1.validators.network import NetAssignmentValidator
from nailgun.api.v1.validators.node import NodeValidator
from nailgun import consts
from nailgun.errors import errors
from nailgun import objects
from nailgun.objects.serializers.node import NodeInterfacesSerializer
from nailgun.db import db
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import NodeNICInterface
from nailgun.task.manager import NodeDeletionTaskManager
from nailgun.logger import logger
from nailgun import notifier
class NodeHandler(SingleHandler):
single = objects.Node
validator = NodeValidator
@content
def DELETE(self, obj_id):
"""Deletes a node from DB and from Cobbler.
:return: JSON-ed deletion task
:http: * 200 (node has been succesfully deleted)
* 202 (node is successfully scheduled for deletion)
* 400 (data validation failed)
* 404 (node not found in db)
* 403 (on of the controllers is in error state)
"""
node = self.get_object_or_404(self.single, obj_id)
task_manager = NodeDeletionTaskManager(cluster_id=node.cluster_id)
try:
task = task_manager.execute([node], mclient_remove=False)
except errors.ControllerInErrorState as e:
raise self.http(403, e.message)
self.raise_task(task)
class NodeCollectionHandler(CollectionHandler):
"""Node collection handler"""
validator = NodeValidator
collection = objects.NodeCollection
@content
def GET(self):
"""May receive cluster_id parameter to filter list of nodes
:returns: Collection of JSONized Node objects.
:http: * 200 (OK)
"""
cluster_id = web.input(cluster_id=None).cluster_id
nodes = self.collection.eager_nodes_handlers(None)
if cluster_id == '':
nodes = nodes.filter_by(cluster_id=None)
elif cluster_id:
nodes = nodes.filter_by(cluster_id=cluster_id)
return self.collection.to_json(nodes)
@content
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (data validation failed)
"""
data = self.checked_data(
self.validator.validate_collection_update
)
nodes_updated = []
for nd in data:
node = self.collection.single.get_by_meta(nd)
if not node:
raise self.http(404, "Can't find node: {0}".format(nd))
self.collection.single.update(node, nd)
nodes_updated.append(node.id)
# we need eagerload everything that is used in render
nodes = self.collection.filter_by_id_list(
self.collection.eager_nodes_handlers(None),
nodes_updated
)
return self.collection.to_json(nodes)
@content
def DELETE(self):
"""Deletes a batch of nodes.
Takes (JSONed) list of node ids to delete.
:return: JSON-ed deletion task
:http: * 200 (nodes have been succesfully deleted)
* 202 (nodes are successfully scheduled for deletion)
* 400 (data validation failed)
* 404 (nodes not found in db)
* 403 (on of the controllers is in error state)
"""
# TODO(pkaminski): web.py does not support parsing of array arguments
# in the queryset so we specify the input as comma-separated list
node_ids = self.checked_data(
validate_method=self.validator.validate_collection_delete,
data=web.input().get('ids', '')
)
nodes = self.get_objects_list_or_404(self.collection, node_ids)
task_manager = NodeDeletionTaskManager(cluster_id=nodes[0].cluster_id)
try:
task = task_manager.execute(nodes, mclient_remove=False)
except errors.ControllerInErrorState as e:
raise self.http(403, e.message)
self.raise_task(task)
class NodeAgentHandler(BaseHandler):
collection = objects.NodeCollection
validator = NodeValidator
@content
def PUT(self):
""":returns: node id.
:http: * 200 (node are successfully updated)
* 304 (node data not changed since last request)
* 400 (data validation failed)
* 404 (node not found)
"""
nd = self.checked_data(
self.validator.validate_update,
data=web.data())
node = self.collection.single.get_by_meta(nd)
if not node:
raise self.http(404, "Can't find node: {0}".format(nd))
node.timestamp = datetime.now()
if not node.online:
node.online = True
msg = u"Node '{0}' is back online".format(node.human_readable_name)
logger.info(msg)
notifier.notify("discover", msg, node_id=node.id)
db().flush()
if 'agent_checksum' in nd and (
node.agent_checksum == nd['agent_checksum']
):
return {'id': node.id, 'cached': True}
self.collection.single.update_by_agent(node, nd)
return {"id": node.id}
class NodeNICsHandler(BaseHandler):
"""Node network interfaces handler"""
model = NodeNICInterface
validator = NetAssignmentValidator
serializer = NodeInterfacesSerializer
@content
def GET(self, node_id):
""":returns: Collection of JSONized Node interfaces.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
return map(self.render, node.interfaces)
@content
def PUT(self, node_id):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (data validation failed)
"""
interfaces_data = self.checked_data(
self.validator.validate_structure_and_data, node_id=node_id)
node_data = {'id': node_id, 'interfaces': interfaces_data}
objects.Cluster.get_network_manager()._update_attrs(node_data)
node = self.get_object_or_404(objects.Node, node_id)
objects.Node.add_pending_change(
node,
consts.CLUSTER_CHANGES.interfaces
)
return map(self.render, node.interfaces)
class NodeCollectionNICsHandler(BaseHandler):
"""Node collection network interfaces handler"""
model = NetworkGroup
validator = NetAssignmentValidator
serializer = NodeInterfacesSerializer
@content
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (data validation failed)
"""
data = self.checked_data(
self.validator.validate_collection_structure_and_data)
updated_nodes_ids = []
for node_data in data:
node_id = objects.Cluster.get_network_manager()._update_attrs(
node_data)
updated_nodes_ids.append(node_id)
updated_nodes = db().query(Node).filter(
Node.id.in_(updated_nodes_ids)
).all()
return [
{
"id": n.id,
"interfaces": map(self.render, n.interfaces)
} for n in updated_nodes
]
class NodeNICsDefaultHandler(BaseHandler):
"""Node default network interfaces handler"""
@content
def GET(self, node_id):
""":returns: Collection of default JSONized interfaces for node.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(objects.Node, node_id)
return self.get_default(node)
def get_default(self, node):
if node.cluster:
return objects.Cluster.get_network_manager(
node.cluster
).get_default_interfaces_configuration(node)
class NodeCollectionNICsDefaultHandler(NodeNICsDefaultHandler):
"""Node collection default network interfaces handler"""
validator = NetAssignmentValidator
@content
def GET(self):
"""May receive cluster_id parameter to filter list of nodes
:returns: Collection of JSONized Nodes interfaces.
:http: * 200 (OK)
"""
cluster_id = web.input(cluster_id=None).cluster_id
if cluster_id:
nodes = \
objects.NodeCollection.filter_by(None, cluster_id=cluster_id)
else:
nodes = objects.NodeCollection.all()
return filter(lambda x: x is not None, map(self.get_default, nodes))
class NodesAllocationStatsHandler(BaseHandler):
"""Node allocation stats handler"""
@content
def GET(self):
""":returns: Total and unallocated nodes count.
:http: * 200 (OK)
"""
unallocated_nodes = db().query(Node).filter_by(cluster_id=None).count()
total_nodes = \
db().query(Node).count()
return {'total': total_nodes,
'unallocated': unallocated_nodes}
| apache-2.0 |
krbaker/Diamond | src/collectors/postfix/test/testpostfix.py | 29 | 2113 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from postfix import PostfixCollector
################################################################################
class TestPostfixCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PostfixCollector', {
'host': 'localhost',
'port': 7777,
'interval': '1',
})
self.collector = PostfixCollector(config, None)
def test_import(self):
self.assertTrue(PostfixCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
first_resp = self.getFixture('postfix-stats.1.json').getvalue()
patch_collector = patch.object(
PostfixCollector,
'get_json',
Mock(return_value=first_resp))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
self.assertPublishedMany(publish_mock, {})
second_resp = self.getFixture('postfix-stats.2.json').getvalue()
patch_collector = patch.object(PostfixCollector,
'get_json',
Mock(return_value=second_resp))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'send.status.sent': 4,
'send.resp_codes.2_0_0': 5,
'clients.127_0_0_1': 1,
}
self.assertPublishedMany(publish_mock, metrics)
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
ahmedbodi/AutobahnPython | examples/twisted/wamp/work/test_interfaces.py | 16 | 6543 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
#from twisted.trial import unittest
import unittest
from zope.interface import implementer
from autobahn.wamp.interfaces import *
from autobahn.wamp.types import *
from autobahn.wamp.exception import ApplicationError, ProtocolError
from twisted.internet.defer import Deferred, inlineCallbacks
import random
def newid():
return random.randint(0, 2**53)
@implementer(ISubscriber)
@implementer(IPublisher)
@implementer(ICallee)
@implementer(ICaller)
class MockSession:
def __init__(self):
self._subscriptions = {}
self._registrations = {}
def subscribe(self, topic, options = None):
assert(type(topic) == str)
assert(options is None or isinstance(options, SubscribeOptions))
if not self._subscriptions.has_key(topic):
self._subscriptions[topic] = Subscription(newid(), topic)
d = Deferred()
d.callback(self._subscriptions[topic])
return d
def unsubscribe(self, subscription):
assert(isinstance(subscription, Subscription))
assert(subscription._isActive)
assert(subscription._topic in self._subscriptions)
subscription._isActive = False
del self._subscriptions[subscription._topic]
d = Deferred()
d.callback(None)
return d
def publish(self, topic, payload = None, options = None):
assert(type(topic) == str)
assert(options is None or isinstance(options, PublishOptions))
d = Deferred()
if topic not in ["com.myapp.mytopic1"]:
d.errback(ApplicationError(ApplicationError.NOT_AUTHORIZED))
else:
id = newid()
if self._subscriptions.has_key(topic):
event = Event(topic, payload, id)
self._subscriptions[topic].notify(event)
d.callback(id)
return d
def register(self, procedure, endpoint, options = None):
assert(type(procedure) == str)
assert(options is None or isinstance(options, RegisterOptions))
if not self._registrations.has_key(procedure):
self._registrations[procedure] = Registration(newid(), procedure, endpoint)
d = Deferred()
d.callback(self._registrations[procedure])
return d
def unregister(self, registration):
assert(isinstance(registration, Registration))
assert(registration._isActive)
assert(registration._procedure in self._registrations)
registration._isActive = False
del self._registrations[registration._procedure]
d = Deferred()
d.callback(None)
return d
def call(self, procedure, *args, **kwargs):
assert(type(procedure) == str)
if 'options' in kwargs:
options = kwargs['options']
del kwargs['options']
assert(isinstance(options, CallOptions))
d = Deferred()
if procedure == "com.myapp.echo":
if len(args) != 1 or len(kwargs) != 0 or type(args[0]) != str:
d.errback(ApplicationError(ApplicationError.INVALID_ARGUMENT, "procedure takes exactly 1 positional argument of type string"))
else:
d.callback(args[0])
elif procedure == "com.myapp.complex":
d.callback(CallResult(23, 7, foo = "bar"))
elif self._registrations.has_key(procedure):
try:
res = self._registrations[procedure]._endpoint(*args, **kwargs)
except TypeError as err:
d.errback(ApplicationError(ApplicationError.INVALID_ARGUMENT, str(err)))
else:
d.callback(res)
else:
d.errback(ApplicationError(ApplicationError.NO_SUCH_PROCEDURE, "no procedure with URI {}".format(procedure)))
return d
@inlineCallbacks
def test_rpc(session):
def hello(msg):
return "You said {}. I say hello!".format(msg)
try:
reg1 = yield session.register("com.myapp.hello", hello)
print(reg1)
except ApplicationError as err:
print(err)
else:
res = yield session.call("com.myapp.hello", "foooo")
print (res)
yield session.unregister(reg1)
res = yield session.call("com.myapp.hello", "baaar")
print (res)
try:
# res = yield session.call("com.myapp.echo", "Hello, world!", 23)
# res = yield session.call("com.myapp.complex", "Hello, world!", 23)
res = yield session.call("com.myapp.complex", "Hello, world!", 23, options = CallOptions(timeout = 2))
print(res.results)
print(res.kwresults)
except ApplicationError as err:
print(err)
@inlineCallbacks
def test_pubsub(session):
try:
sub1 = yield session.subscribe("com.myapp.mytopic1", SubscribeOptions(match = 'prefix'))
print(sub1)
except ApplicationError as err:
print(err)
else:
def watcher1(event):
print("watcher1: publication {} on topic {} with payload {}".format(event.publication, event.topic, event.payload))
def watcher2(event):
print("watcher1: publication {} on topic {} with payload {}".format(event.publication, event.topic, event.payload))
sub1.watch(watcher1)
sub1.watch(watcher2)
session.publish("com.myapp.mytopic1", "Hello, world!")
sub1.unwatch(watcher1)
publicationId = yield session.publish("com.myapp.mytopic1", "Hello, world!")
print(publicationId)
session.publish("com.myapp.mytopic2", "Hello, world!")
class Publisher(unittest.TestCase):
def setUp(self):
self.session = MockSession()
def tearDown(self):
pass
@inlineCallbacks
def test_register(self):
def hello(msg):
return "You said {}. I say hello!".format(msg)
try:
reg1 = yield self.session.register("com.myapp.hello", hello)
except ApplicationError as err:
print(err)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
works-mobile/Diamond | src/collectors/cpu/cpu.py | 16 | 9037 | # coding=utf-8
"""
The CPUCollector collects CPU utilization metric using /proc/stat.
#### Dependencies
* /proc/stat
"""
import diamond.collector
import os
import time
from diamond.collector import str_to_bool
try:
import psutil
except ImportError:
psutil = None
class CPUCollector(diamond.collector.Collector):
PROC = '/proc/stat'
INTERVAL = 1
MAX_VALUES = {
'user': diamond.collector.MAX_COUNTER,
'nice': diamond.collector.MAX_COUNTER,
'system': diamond.collector.MAX_COUNTER,
'idle': diamond.collector.MAX_COUNTER,
'iowait': diamond.collector.MAX_COUNTER,
'irq': diamond.collector.MAX_COUNTER,
'softirq': diamond.collector.MAX_COUNTER,
'steal': diamond.collector.MAX_COUNTER,
'guest': diamond.collector.MAX_COUNTER,
'guest_nice': diamond.collector.MAX_COUNTER,
}
def get_default_config_help(self):
config_help = super(CPUCollector, self).get_default_config_help()
config_help.update({
'percore': 'Collect metrics per cpu core or just total',
'simple': 'only return aggregate CPU% metric',
'normalize': 'for cpu totals, divide by the number of CPUs',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CPUCollector, self).get_default_config()
config.update({
'path': 'cpu',
'percore': 'True',
'xenfix': None,
'simple': 'False',
'normalize': 'False',
})
return config
def collect(self):
"""
Collector cpu stats
"""
def cpu_time_list():
"""
get cpu time list
"""
statFile = open(self.PROC, "r")
timeList = statFile.readline().split(" ")[2:6]
for i in range(len(timeList)):
timeList[i] = int(timeList[i])
statFile.close()
return timeList
def cpu_delta_time(interval):
"""
Get before and after cpu times for usage calc
"""
pre_check = cpu_time_list()
time.sleep(interval)
post_check = cpu_time_list()
for i in range(len(pre_check)):
post_check[i] -= pre_check[i]
return post_check
if os.access(self.PROC, os.R_OK):
# If simple only return aggregate CPU% metric
if str_to_bool(self.config['simple']):
dt = cpu_delta_time(self.INTERVAL)
cpuPct = 100 - (dt[len(dt) - 1] * 100.00 / sum(dt))
self.publish('percent', str('%.4f' % cpuPct))
return True
results = {}
# Open file
file = open(self.PROC)
ncpus = -1 # dont want to count the 'cpu'(total) cpu.
for line in file:
if not line.startswith('cpu'):
continue
ncpus += 1
elements = line.split()
cpu = elements[0]
if cpu == 'cpu':
cpu = 'total'
elif not str_to_bool(self.config['percore']):
continue
results[cpu] = {}
if len(elements) >= 2:
results[cpu]['user'] = elements[1]
if len(elements) >= 3:
results[cpu]['nice'] = elements[2]
if len(elements) >= 4:
results[cpu]['system'] = elements[3]
if len(elements) >= 5:
results[cpu]['idle'] = elements[4]
if len(elements) >= 6:
results[cpu]['iowait'] = elements[5]
if len(elements) >= 7:
results[cpu]['irq'] = elements[6]
if len(elements) >= 8:
results[cpu]['softirq'] = elements[7]
if len(elements) >= 9:
results[cpu]['steal'] = elements[8]
if len(elements) >= 10:
results[cpu]['guest'] = elements[9]
if len(elements) >= 11:
results[cpu]['guest_nice'] = elements[10]
# Close File
file.close()
metrics = {}
for cpu in results.keys():
stats = results[cpu]
for s in stats.keys():
# Get Metric Name
metric_name = '.'.join([cpu, s])
# Get actual data
if (str_to_bool(self.config['normalize'])
and cpu == 'total' and ncpus > 0):
metrics[metric_name] = self.derivative(
metric_name,
long(stats[s]),
self.MAX_VALUES[s]) / ncpus
else:
metrics[metric_name] = self.derivative(
metric_name,
long(stats[s]),
self.MAX_VALUES[s])
# Check for a bug in xen where the idle time is doubled for guest
# See https://bugzilla.redhat.com/show_bug.cgi?id=624756
if self.config['xenfix'] is None or self.config['xenfix'] is True:
if os.path.isdir('/proc/xen'):
total = 0
for metric_name in metrics.keys():
if 'cpu0.' in metric_name:
total += int(metrics[metric_name])
if total > 110:
self.config['xenfix'] = True
for mname in metrics.keys():
if '.idle' in mname:
metrics[mname] = float(metrics[mname]) / 2
elif total > 0:
self.config['xenfix'] = False
else:
self.config['xenfix'] = False
# Publish Metric Derivative
for metric_name in metrics.keys():
self.publish(metric_name,
metrics[metric_name])
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No cpu metrics retrieved')
return None
cpu_time = psutil.cpu_times(True)
cpu_count = len(cpu_time)
total_time = psutil.cpu_times()
for i in range(0, len(cpu_time)):
metric_name = 'cpu' + str(i)
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
cpu_time[i].user,
self.MAX_VALUES['user']))
if hasattr(cpu_time[i], 'nice'):
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
cpu_time[i].nice,
self.MAX_VALUES['nice']))
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
cpu_time[i].system,
self.MAX_VALUES['system']))
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
cpu_time[i].idle,
self.MAX_VALUES['idle']))
metric_name = 'total'
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
total_time.user,
self.MAX_VALUES['user'])
/ cpu_count)
if hasattr(total_time, 'nice'):
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
total_time.nice,
self.MAX_VALUES['nice'])
/ cpu_count)
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
total_time.system,
self.MAX_VALUES['system'])
/ cpu_count)
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
total_time.idle,
self.MAX_VALUES['idle'])
/ cpu_count)
return True
return None
| mit |
tvibliani/odoo | addons/report/models/report_paperformat.py | 311 | 7814 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from functools import partial
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
class report_paperformat(osv.Model):
_name = "report.paperformat"
_description = "Allows customization of a report."
_columns = {'name': fields.char('Name', required=True),
'default': fields.boolean('Default paper format ?'),
'format': fields.selection([('A0', 'A0 5 841 x 1189 mm'),
('A1', 'A1 6 594 x 841 mm'),
('A2', 'A2 7 420 x 594 mm'),
('A3', 'A3 8 297 x 420 mm'),
('A4', 'A4 0 210 x 297 mm, 8.26 x 11.69 inches'),
('A5', 'A5 9 148 x 210 mm'),
('A6', 'A6 10 105 x 148 mm'),
('A7', 'A7 11 74 x 105 mm'),
('A8', 'A8 12 52 x 74 mm'),
('A9', 'A9 13 37 x 52 mm'),
('B0', 'B0 14 1000 x 1414 mm'),
('B1', 'B1 15 707 x 1000 mm'),
('B2', 'B2 17 500 x 707 mm'),
('B3', 'B3 18 353 x 500 mm'),
('B4', 'B4 19 250 x 353 mm'),
('B5', 'B5 1 176 x 250 mm, 6.93 x 9.84 inches'),
('B6', 'B6 20 125 x 176 mm'),
('B7', 'B7 21 88 x 125 mm'),
('B8', 'B8 22 62 x 88 mm'),
('B9', 'B9 23 33 x 62 mm'),
('B10', ':B10 16 31 x 44 mm'),
('C5E', 'C5E 24 163 x 229 mm'),
('Comm10E', 'Comm10E 25 105 x 241 mm, U.S. '
'Common 10 Envelope'),
('DLE', 'DLE 26 110 x 220 mm'),
('Executive', 'Executive 4 7.5 x 10 inches, '
'190.5 x 254 mm'),
('Folio', 'Folio 27 210 x 330 mm'),
('Ledger', 'Ledger 28 431.8 x 279.4 mm'),
('Legal', 'Legal 3 8.5 x 14 inches, '
'215.9 x 355.6 mm'),
('Letter', 'Letter 2 8.5 x 11 inches, '
'215.9 x 279.4 mm'),
('Tabloid', 'Tabloid 29 279.4 x 431.8 mm'),
('custom', 'Custom')],
'Paper size',
help="Select Proper Paper size"),
'margin_top': fields.integer('Top Margin (mm)'),
'margin_bottom': fields.integer('Bottom Margin (mm)'),
'margin_left': fields.integer('Left Margin (mm)'),
'margin_right': fields.integer('Right Margin (mm)'),
'page_height': fields.integer('Page height (mm)'),
'page_width': fields.integer('Page width (mm)'),
'orientation': fields.selection([('Landscape', 'Landscape'),
('Portrait', 'Portrait')],
'Orientation'),
'header_line': fields.boolean('Display a header line'),
'header_spacing': fields.integer('Header spacing'),
'dpi': fields.integer('Output DPI', required=True),
'report_ids': fields.one2many('ir.actions.report.xml',
'paperformat_id',
'Associated reports',
help="Explicitly associated reports")
}
def _check_format_or_page(self, cr, uid, ids, context=None):
for paperformat in self.browse(cr, uid, ids, context=context):
if paperformat.format != 'custom' and (paperformat.page_width or paperformat.page_height):
return False
return True
_constraints = [
(_check_format_or_page, 'Error ! You cannot select a format AND speficic '
'page width/height.', ['format']),
]
_defaults = {
'format': 'A4',
'margin_top': 40,
'margin_bottom': 20,
'margin_left': 7,
'margin_right': 7,
'page_height': False,
'page_width': False,
'orientation': 'Landscape',
'header_line': False,
'header_spacing': 35,
'dpi': 90,
}
class res_company(osv.Model):
_inherit = 'res.company'
_columns = {'paperformat_id': fields.many2one('report.paperformat', 'Paper format')}
def init(self, cr):
# set a default paperformat based on rml one.
ref = partial(self.pool['ir.model.data'].xmlid_to_res_id, cr, SUPERUSER_ID)
ids = self.search(cr, SUPERUSER_ID, [('paperformat_id', '=', False)])
for company in self.browse(cr, SUPERUSER_ID, ids):
paperformat_id = {
'a4': ref('report.paperformat_euro'),
'us_letter': ref('report.paperformat_us'),
}.get(company.rml_paper_format) or ref('report.paperformat_euro')
if paperformat_id:
company.write({'paperformat_id': paperformat_id})
sup = super(res_company, self)
if hasattr(sup, 'init'):
sup.init(cr)
class ir_actions_report(osv.Model):
_inherit = 'ir.actions.report.xml'
def associated_view(self, cr, uid, ids, context):
"""Used in the ir.actions.report.xml form view in order to search naively after the view(s)
used in the rendering.
"""
try:
report_name = self.browse(cr, uid, ids[0], context).report_name
act_window_obj = self.pool.get('ir.actions.act_window')
view_action = act_window_obj.for_xml_id(cr, uid, 'base', 'action_ui_view', context=context)
view_action['domain'] = [('name', 'ilike', report_name.split('.')[1]), ('type', '=', 'qweb')]
return view_action
except:
return False
_columns = {'paperformat_id': fields.many2one('report.paperformat', 'Paper format')}
| agpl-3.0 |
aiguofer/bokeh | bokeh/plotting/figure.py | 3 | 18251 | from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
from six import string_types
from ..core.properties import Auto, Either, Enum, Float, Int, Seq, Instance, String, Tuple
from ..core.enums import HorizontalLocation, VerticalLocation
from ..models import Plot
from ..models.annotations import Title
from ..models.ranges import Range
from ..models.tools import Tool
from ..models import glyphs, markers
from ..util.options import Options
from ..util._plot_arg_helpers import _convert_responsive
from .helpers import _get_range, _process_axis_and_grid, _process_tools_arg, _glyph_function, _process_active_tools
DEFAULT_TOOLS = "pan,wheel_zoom,box_zoom,save,reset,help"
class FigureOptions(Options):
tools = Either(String, Seq(Either(String, Instance(Tool))), default=DEFAULT_TOOLS, help="""
Tools the plot should start with.
""")
x_range = Either(Tuple(Float, Float), Seq(String), Instance(Range), help="""
Customize the x-range of the plot.
""")
y_range = Either(Tuple(Float, Float), Seq(String), Instance(Range), help="""
Customize the x-range of the plot.
""")
x_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent x-axis major ticks.
""")
y_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent y-axis major ticks.
""")
x_axis_location = Enum(VerticalLocation, default="below", help="""
Where the x-axis should be located.
""")
y_axis_location = Enum(HorizontalLocation, default="left", help="""
Where the y-axis should be located.
""")
x_axis_label = String(default="", help="""
A label for the x-axis.
""")
y_axis_label = String(default="", help="""
A label for the y-axis.
""")
active_drag = Either(Auto, String, Instance(Tool), default="auto", help="""
Which drag tool should initially be active.
""")
active_scroll = Either(Auto, String, Instance(Tool), default="auto", help="""
Which scroll tool should initially be active.
""")
active_tap = Either(Auto, String, Instance(Tool), default="auto", help="""
Which tap tool should initially be active.
""")
x_axis_type = Either(Auto, Enum("linear", "log", "datetime"), default="auto", help="""
The type of the x-axis.
""")
y_axis_type = Either(Auto, Enum("linear", "log", "datetime"), default="auto", help="""
The type of the y-axis.
""")
class Figure(Plot):
''' A subclass of :class:`~bokeh.models.plots.Plot` that simplifies plot
creation with default axes, grids, tools, etc.
In addition to all the Bokeh model property attributes documented below,
the ``Figure`` initializer also accepts the following options, which can
help simplify configuration:
.. bokeh-options:: FigureOptions
:module: bokeh.plotting.figure
'''
__subtype__ = "Figure"
__view_model__ = "Plot"
def __init__(self, *arg, **kw):
opts = FigureOptions(kw)
title = kw.get("title", None)
if isinstance(title, string_types):
kw['title'] = Title(text=title)
super(Figure, self).__init__(*arg, **kw)
self.x_range = _get_range(opts.x_range)
self.y_range = _get_range(opts.y_range)
_process_axis_and_grid(self, opts.x_axis_type, opts.x_axis_location, opts.x_minor_ticks, opts.x_axis_label, self.x_range, 0)
_process_axis_and_grid(self, opts.y_axis_type, opts.y_axis_location, opts.y_minor_ticks, opts.y_axis_label, self.y_range, 1)
tool_objs, tool_map = _process_tools_arg(self, opts.tools)
self.add_tools(*tool_objs)
_process_active_tools(self.toolbar, tool_map, opts.active_drag, opts.active_scroll, opts.active_tap)
annular_wedge = _glyph_function(glyphs.AnnularWedge)
annulus = _glyph_function(glyphs.Annulus, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
""")
arc = _glyph_function(glyphs.Arc)
asterisk = _glyph_function(markers.Asterisk, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
""")
bezier = _glyph_function(glyphs.Bezier)
circle = _glyph_function(markers.Circle, """
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to data units.
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
""")
circle_cross = _glyph_function(markers.CircleCross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
""")
circle_x = _glyph_function(markers.CircleX, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
""")
cross = _glyph_function(markers.Cross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
""")
diamond = _glyph_function(markers.Diamond, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
""")
diamond_cross = _glyph_function(markers.DiamondCross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
""")
hbar = _glyph_function(glyphs.HBar, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
""")
ellipse = _glyph_function(glyphs.Ellipse, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
""")
image = _glyph_function(glyphs.Image, """
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
""")
image_rgba = _glyph_function(glyphs.ImageRGBA, """
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
""")
image_url = _glyph_function(glyphs.ImageURL)
inverted_triangle = _glyph_function(markers.InvertedTriangle, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
""")
line = _glyph_function(glyphs.Line, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(title="line", plot_width=300, plot_height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
""")
multi_line = _glyph_function(glyphs.MultiLine, """
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
""")
oval = _glyph_function(glyphs.Oval, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,
angle=-0.7, color="#1D91C0")
show(plot)
""")
patch = _glyph_function(glyphs.Patch, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
""")
patches = _glyph_function(glyphs.Patches, """
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
""")
quad = _glyph_function(glyphs.Quad, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
""")
quadratic = _glyph_function(glyphs.Quadratic)
ray = _glyph_function(glyphs.Ray, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
""")
rect = _glyph_function(glyphs.Rect, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
""")
segment = _glyph_function(glyphs.Segment, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3], x1=[1, 2, 3],
y1=[1.2, 2.5, 3.7], color="#F4A582",
line_width=3)
show(plot)
""")
square = _glyph_function(markers.Square, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
""")
square_cross = _glyph_function(markers.SquareCross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
""")
square_x = _glyph_function(markers.SquareX, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
""")
text = _glyph_function(glyphs.Text, """
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
Returns:
GlyphRenderer
""")
triangle = _glyph_function(markers.Triangle, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
""")
vbar = _glyph_function(glyphs.VBar, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
""")
wedge = _glyph_function(glyphs.Wedge, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
""")
x = _glyph_function(markers.X, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
""")
def scatter(self, *args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in screen units
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: :ref:`userguide_styling_line_properties` and :ref:`userguide_styling_fill_properties`
Examples:
>>> p.scatter([1,2,3],[4,5,6], fill_color="red")
>>> p.scatter("data1", "data2", source=data_source, ...)
"""
markertype = kwargs.pop("marker", "circle")
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
# TODO (bev) make better when plotting.scatter is removed
conversions = {
"*": "asterisk",
"+": "cross",
"o": "circle",
"ox": "circle_x",
"o+": "circle_cross"
}
if markertype in conversions:
markertype = conversions[markertype]
return getattr(self, markertype)(*args, **kwargs)
def figure(**kwargs):
''' Create a new :class:`~bokeh.plotting.figure.Figure` for plotting.
In addition to the standard :class:`~bokeh.plotting.figure.Figure`
property values (e.g. ``plot_width`` or ``sizing_mode``) the following
additional options can be passed as well:
.. bokeh-options:: FigureOptions
:module: bokeh.plotting.figure
Returns:
Figure
'''
if 'plot_width' in kwargs and 'width' in kwargs:
raise ValueError("figure() called with both 'plot_width' and 'width' supplied, supply only one")
if 'plot_height' in kwargs and 'height' in kwargs:
raise ValueError("figure() called with both 'plot_height' and 'height' supplied, supply only one")
if 'height' in kwargs:
kwargs['plot_height'] = kwargs.pop('height')
if 'width' in kwargs:
kwargs['plot_width'] = kwargs.pop('width')
if 'responsive' in kwargs and 'sizing_mode' in kwargs:
raise ValueError("figure() called with both 'responsive' and 'sizing_mode' supplied, supply only one")
if 'responsive' in kwargs:
kwargs['sizing_mode'] = _convert_responsive(kwargs['responsive'])
del kwargs['responsive']
fig = Figure(**kwargs)
return fig
_marker_types = [
"asterisk",
"circle",
"circle_cross",
"circle_x",
"cross",
"diamond",
"diamond_cross",
"inverted_triangle",
"square",
"square_x",
"square_cross",
"triangle",
"x",
"*",
"+",
"o",
"ox",
"o+",
]
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print("Available markers: \n - " + "\n - ".join(_marker_types))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
| bsd-3-clause |
4doemaster/enigma2 | lib/python/Plugins/SystemPlugins/SoftwareManager/BackupRestore.py | 2 | 14679 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.Console import Console
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Components.config import getConfigListEntry, configfile, ConfigSelection, ConfigSubsection, ConfigText, ConfigLocations
from Components.config import config
from Components.ConfigList import ConfigList,ConfigListScreen
from Components.FileList import MultiFileSelectList
from Plugins.Plugin import PluginDescriptor
from enigma import eTimer, eEnv
from Tools.Directories import *
from os import popen, path, makedirs, listdir, access, stat, rename, remove, W_OK, R_OK
from time import gmtime, strftime, localtime
from datetime import date
config.plugins.configurationbackup = ConfigSubsection()
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname', '/etc/CCcam.cfg', '/usr/keys/mg_cfg'])
def getBackupPath():
backuppath = config.plugins.configurationbackup.backuplocation.value
if backuppath.endswith('/'):
return backuppath + 'backup'
else:
return backuppath + '/backup'
def getBackupFilename():
return "enigma2settingsbackup.tar.gz"
class BackupScreen(Screen, ConfigListScreen):
skin = """
<screen position="135,144" size="350,310" title="Backup is running" >
<widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, runBackup = False):
Screen.__init__(self, session)
self.session = session
self.runBackup = runBackup
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.close,
"back": self.close,
"cancel": self.close,
}, -1)
self.finished_cb = None
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.list = []
ConfigListScreen.__init__(self, self.list)
self.onLayoutFinish.append(self.layoutFinished)
if self.runBackup:
self.onShown.append(self.doBackup)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Backup is running..."))
# iq [
def addSkinToBackupDirs(self):
from os import system as os_system, path as os_path
# if not default skin
if os_system("grep config.skin.primary_skin /etc/enigma2/settings 1>/dev/null") == 0 and config.skin.primary_skin.value != "skin.xml":
skinName = config.skin.primary_skin.value[:-9]
skinDir = "/usr/share/enigma2/%s" % skinName
skinFiles = skinDir + "/.skin_files"
if os_path.exists(skinFiles):
print "maybe skin from backup, not from opkg"
else:
if os_system("grep %s /usr/lib/opkg/info/*.list 1>/dev/null" % skinName) == 0:
os_system("opkg files `opkg search %s | awk '{print $1}'` > %s" % ("/usr/share/enigma2/"+config.skin.primary_skin.value, skinFiles))
self.backupdirs = self.backupdirs + " " + skinFiles
else:
print "skin is not from backup and not from opkg, can not know skin files"
return
self.backupdirs = self.backupdirs + " " + skinDir
for line in open(skinFiles).readlines():
if os_path.exists(line.split("\n")[0]) and not line.startswith(skinDir):
self.backupdirs = self.backupdirs + " " + line.split("\n")[0]
# ]
def doBackup(self):
configfile.save()
try:
if (path.exists(self.backuppath) == False):
makedirs(self.backuppath)
# iq [
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.value )
# self.backupdirs = ""
# from os import path as os_path
# for backupdir in config.plugins.configurationbackup.backupdirs.value:
# if os_path.exists(backupdir):
# self.backupdirs = self.backupdirs + " " + backupdir
self.addSkinToBackupDirs()
# ]
if path.exists(self.fullbackupfilename):
dt = str(date.fromtimestamp(stat(self.fullbackupfilename).st_ctime))
self.newfilename = self.backuppath + "/" + dt + '-' + self.backupfile
if path.exists(self.newfilename):
remove(self.newfilename)
rename(self.fullbackupfilename,self.newfilename)
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, Console, title = _("Backup is running..."), cmdlist = ["tar -czvf /tmp/" + self.backupfile + " " + self.backupdirs, "mv /tmp/" + self.backupfile + " " + self.fullbackupfilename, "sync"], finishedCallback = self.backupFinishedCB,closeOnSuccess = True)
else:
self.session.open(Console, title = _("Backup is running..."), cmdlist = ["tar -czvf /tmp/" + self.backupfile + " " + self.backupdirs, "mv /tmp/" + self.backupfile + " " + self.fullbackupfilename, "sync"], finishedCallback = self.backupFinishedCB, closeOnSuccess = True)
except OSError:
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, MessageBox, _("Sorry your backup destination is not writeable.\nPlease choose an other one."), MessageBox.TYPE_INFO, timeout = 10 )
else:
self.session.openWithCallback(self.backupErrorCB,MessageBox, _("Sorry your backup destination is not writeable.\nPlease choose an other one."), MessageBox.TYPE_INFO, timeout = 10 )
def backupFinishedCB(self,retval = None):
self.close(True)
def backupErrorCB(self,retval = None):
self.close(False)
def runAsync(self, finished_cb):
self.finished_cb = finished_cb
self.doBackup()
class BackupSelection(Screen):
skin = """
<screen name="BackupSelection" position="center,center" size="560,400" title="Select files/folders to backup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="checkList" position="5,50" size="550,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText()
self.selectedFiles = config.plugins.configurationbackup.backupdirs.value
defaultDir = '/'
inhibitDirs = ["/bin", "/boot", "/dev", "/autofs", "/lib", "/proc", "/sbin", "/sys", "/hdd", "/tmp", "/mnt", "/media"]
self.filelist = MultiFileSelectList(self.selectedFiles, defaultDir, inhibitDirs = inhibitDirs )
self["checkList"] = self.filelist
self["actions"] = ActionMap(["DirectionActions", "OkCancelActions", "ShortcutActions"],
{
"cancel": self.exit,
"red": self.exit,
"yellow": self.changeSelectionState,
"green": self.saveSelection,
"ok": self.okClicked,
"left": self.left,
"right": self.right,
"down": self.down,
"up": self.up
}, -1)
if not self.selectionChanged in self["checkList"].onSelectionChanged:
self["checkList"].onSelectionChanged.append(self.selectionChanged)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
idx = 0
self["checkList"].moveToIndex(idx)
self.setWindowTitle()
self.selectionChanged()
def setWindowTitle(self):
self.setTitle(_("Select files/folders to backup"))
def selectionChanged(self):
current = self["checkList"].getCurrent()[0]
if current[2] is True:
self["key_yellow"].setText(_("Deselect"))
else:
self["key_yellow"].setText(_("Select"))
def up(self):
self["checkList"].up()
def down(self):
self["checkList"].down()
def left(self):
self["checkList"].pageUp()
def right(self):
self["checkList"].pageDown()
def changeSelectionState(self):
self["checkList"].changeSelectionState()
self.selectedFiles = self["checkList"].getSelectedList()
def saveSelection(self):
self.selectedFiles = self["checkList"].getSelectedList()
config.plugins.configurationbackup.backupdirs.value = self.selectedFiles
config.plugins.configurationbackup.backupdirs.save()
config.plugins.configurationbackup.save()
config.save()
self.close(None)
def exit(self):
self.close(None)
def okClicked(self):
if self.filelist.canDescent():
self.filelist.descent()
class RestoreMenu(Screen):
skin = """
<screen name="RestoreMenu" position="center,center" size="560,400" title="Restore backups" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="filelist" position="5,50" size="550,230" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
self.skin_path = plugin_path
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Restore"))
self["key_yellow"] = StaticText(_("Delete"))
self.sel = []
self.val = []
self.entry = False
self.exe = False
self.path = ""
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.KeyOk,
"cancel": self.keyCancel
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"green": self.KeyOk,
"yellow": self.deleteFile,
})
self.flist = []
self["filelist"] = MenuList(self.flist)
self.fill_list()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Restore backups"))
def fill_list(self):
self.flist = []
self.path = getBackupPath()
if (path.exists(self.path) == False):
makedirs(self.path)
for file in listdir(self.path):
if (file.endswith(".tar.gz")):
self.flist.append((file))
self.entry = True
self["filelist"].l.setList(self.flist)
def KeyOk(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
if self.sel:
self.val = self.path + "/" + self.sel
self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore\nfollowing backup:\n") + self.sel + _("\nSystem will restart after the restore!"))
def keyCancel(self):
self.close()
def startRestore(self, ret = False):
if (ret == True):
self.exe = True
# self.session.open(Console, title = _("Restore running"), cmdlist = ["tar -xzvf " + self.path + "/" + self.sel + " -C /", "killall -9 enigma2"])
self.session.open(Console, title = _("Restore running"), cmdlist = ["cp " + self.path + "/" + self.sel + " /tmp/", "tar -xzvf /tmp/" + self.sel + " -C /", "killall -9 enigma2"])
def deleteFile(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
if self.sel:
self.val = self.path + "/" + self.sel
self.session.openWithCallback(self.startDelete, MessageBox, _("Are you sure you want to delete\nfollowing backup:\n") + self.sel)
def startDelete(self, ret = False):
if (ret == True):
self.exe = True
print "removing:",self.val
if (path.exists(self.val) == True):
remove(self.val)
self.exe = False
self.fill_list()
class RestoreScreen(Screen, ConfigListScreen):
skin = """
<screen position="135,144" size="350,310" title="Restore is running..." >
<widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, runRestore = False):
Screen.__init__(self, session)
self.session = session
self.runRestore = runRestore
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.close,
"back": self.close,
"cancel": self.close,
}, -1)
self.finished_cb = None
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.list = []
ConfigListScreen.__init__(self, self.list)
self.onLayoutFinish.append(self.layoutFinished)
if self.runRestore:
self.onShown.append(self.doRestore)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Restore is running..."))
def doRestore(self):
if path.exists("/proc/stb/vmpeg/0/dst_width"):
restorecmdlist = ["cp " + self.fullbackupfilename + " /tmp/", "tar -xzvf /tmp/" + self.backupfile + " -C /", "echo 0 > /proc/stb/vmpeg/0/dst_height", "echo 0 > /proc/stb/vmpeg/0/dst_left", "echo 0 > /proc/stb/vmpeg/0/dst_top", "echo 0 > /proc/stb/vmpeg/0/dst_width", "killall -9 enigma2"]
else:
restorecmdlist = ["cp " + self.fullbackupfilename + " /tmp/", "tar -xzvf /tmp/" + self.backupfile + " -C /", "killall -9 enigma2"]
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, Console, title = _("Restore is running..."), cmdlist = restorecmdlist)
else:
self.session.open(Console, title = _("Restore is running..."), cmdlist = restorecmdlist)
def backupFinishedCB(self,retval = None):
self.close(True)
def backupErrorCB(self,retval = None):
self.close(False)
def runAsync(self, finished_cb):
self.finished_cb = finished_cb
self.doRestore()
| gpl-2.0 |
cl4rke/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
mapbox/gyp | pylib/gyp/generator/gypd.py | 18 | 3382 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| bsd-3-clause |
jalavik/invenio | invenio/ext/sqlalchemy/types/legacytinyinteger.py | 17 | 1812 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 52 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Platform-independent TinyInteger type."""
from sqlalchemy.types import Integer, TypeDecorator
from sqlalchemy.dialects.mysql import TINYINT
class LegacyTinyInteger(TypeDecorator):
"""Platform-independent TinyInteger type.
Uses MySQL's :class:`~sqlalchemy.dialects.mysql.TINYINT` type, otherwise
uses SQLAlchemy definition of :class:`~sqlalchemy.types.Integer`.
"""
impl = Integer
def __init__(self, display_width=2, unsigned=False, **kwargs):
"""Reserve special arguments only for MySQL Platform."""
self.display_width = display_width
self.unsigned = unsigned
super(LegacyTinyInteger, self).__init__(**kwargs)
def load_dialect_impl(self, dialect):
"""Load dialect dependent implementation."""
if dialect.name == 'mysql':
return dialect.type_descriptor(TINYINT(self.display_width,
unsigned=self.unsigned))
else:
return dialect.type_descriptor(Integer)
| gpl-2.0 |
Dandandan/wikiprogramming | jsrepl/extern/python/unclosured/lib/python2.7/ntpath.py | 81 | 18082 | # Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
| mit |
bluedazzle/django-simple-serializer | src/dss/Serializer.py | 1 | 6060 | # coding: utf-8
from __future__ import unicode_literals
import sys
PY2 = True
if sys.version < '3':
from future.builtins import str, int
PY2 = False
import datetime
import json
from decimal import Decimal
from .TimeFormatFactory import TimeFormatFactory
try:
from django.db import models
from django.db.models import manager
from django.core.paginator import Page
from django.db.models.query import QuerySet
from django.db.models.fields.files import ImageFieldFile, FileField
except ImportError:
raise RuntimeError('django is required in django simple serializer')
class Serializer(object):
include_attr = []
exclude_attr = []
objects = []
origin_data = None
output_type = 'raw'
datetime_format = 'timestamp'
foreign = False
many = False
through = True
def __init__(self, data, datetime_format='timestamp', output_type='raw', include_attr=None, exclude_attr=None,
foreign=False, many=False, through=True, *args, **kwargs):
if include_attr:
self.include_attr = include_attr
if exclude_attr:
self.exclude_attr = exclude_attr
self.origin_data = data
self.output_type = output_type
self.foreign = foreign
self.many = many
self.through = through
self.through_fields = []
self.source_field = None
self.datetime_format = datetime_format
self.time_func = TimeFormatFactory.get_time_func(datetime_format)
self._dict_check = kwargs.get('dict_check', False)
def check_attr(self, attr):
if self.exclude_attr and attr in self.exclude_attr:
return False
if self.include_attr and attr not in self.include_attr:
return False
return True
def data_inspect(self, data, extra=None):
if isinstance(data, (QuerySet, Page, list)):
convert_data = []
if extra:
for i, obj in enumerate(data):
convert_data.append(self.data_inspect(obj, extra.get(
**{self.through_fields[0]: obj, self.through_fields[1]: self.source_field})))
else:
for obj in data:
convert_data.append(self.data_inspect(obj))
return convert_data
elif isinstance(data, models.Model):
obj_dict = {}
concrete_model = data._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.rel is None:
if self.check_attr(field.name) and hasattr(data, field.name):
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
else:
if self.check_attr(field.name) and self.foreign:
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
for field in concrete_model._meta.many_to_many:
if self.check_attr(field.name) and self.many:
obj_dict[field.name] = self.data_inspect(getattr(data, field.name))
for k, v in data.__dict__.items():
if not str(k).startswith('_') and k not in obj_dict.keys() and self.check_attr(k):
obj_dict[k] = self.data_inspect(v)
if extra:
for field in extra._meta.concrete_model._meta.local_fields:
if field.name not in obj_dict.keys() and field.name not in self.through_fields:
if field.rel is None:
if self.check_attr(field.name) and hasattr(extra, field.name):
obj_dict[field.name] = self.data_inspect(getattr(extra, field.name))
else:
if self.check_attr(field.name) and self.foreign:
obj_dict[field.name] = self.data_inspect(getattr(extra, field.name))
return obj_dict
elif isinstance(data, manager.Manager):
through_list = data.through._meta.concrete_model._meta.local_fields
through_data = data.through._default_manager
self.through_fields = [data.target_field.name, data.source_field.name]
self.source_field = data.instance
if len(through_list) > 3 and self.through:
return self.data_inspect(data.all(), through_data)
else:
return self.data_inspect(data.all())
elif isinstance(data, (datetime.datetime, datetime.date, datetime.time)):
return self.time_func(data)
elif isinstance(data, (ImageFieldFile, FileField)):
return data.url if data.url else data.path
elif isinstance(data, Decimal):
return float(data)
elif isinstance(data, dict):
obj_dict = {}
if self._dict_check:
for k, v in data.items():
obj_dict[k] = self.data_inspect(v)
else:
for k, v in data.items():
if self.check_attr(k):
obj_dict[k] = self.data_inspect(v)
return obj_dict
elif isinstance(data, (str, bool, float, int)):
return data
else:
return None
def data_format(self):
self.objects = self.data_inspect(self.origin_data)
def get_values(self):
output_switch = {'dict': self.objects,
'raw': self.objects,
'json': json.dumps(self.objects, indent=4)}
return output_switch.get(self.output_type, self.objects)
def __call__(self):
self.data_format()
return self.get_values()
def serializer(data, datetime_format='timestamp', output_type='raw', include_attr=None, exclude_attr=None,
foreign=False, many=False, through=True, *args, **kwargs):
s = Serializer(data, datetime_format, output_type, include_attr, exclude_attr,
foreign, many, through, *args, **kwargs)
return s()
| bsd-2-clause |
amenonsen/ansible | lib/ansible/modules/storage/purestorage/purefa_dns.py | 17 | 3761 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_dns
version_added: '2.8'
short_description: Configure FlashArray DNS settings
description:
- Set or erase configuration for the DNS settings.
- Nameservers provided will overwrite any existing nameservers.
author:
- Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
options:
state:
description:
- Set or delete directory service configuration
default: present
type: str
choices: [ absent, present ]
domain:
description:
- Domain suffix to be appended when perofrming DNS lookups.
type: str
nameservers:
description:
- List of up to 3 unique DNS server IP addresses. These can be
IPv4 or IPv6 - No validation is done of the addresses is performed.
type: list
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Delete exisitng DNS settings
purefa_dns:
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Set DNS settings
purefa_dns:
domain: purestorage.com
nameservers:
- 8.8.8.8
- 8.8.4.4
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def delete_dns(module, array):
"""Delete DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] == '' and current_dns['nameservers'] == ['']:
module.exit_json(changed=changed)
else:
try:
array.set_dns(domain='', nameservers=[])
changed = True
except Exception:
module.fail_json(msg='Delete DNS settigs failed')
module.exit_json(changed=changed)
def create_dns(module, array):
"""Set DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] != module.params['domain'] or sorted(module.params['nameservers']) != sorted(current_dns['nameservers']):
try:
array.set_dns(domain=module.params['domain'],
nameservers=module.params['nameservers'][0:3])
changed = True
except Exception:
module.fail_json(msg='Set DNS settings failed: Check configuration')
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
domain=dict(type='str'),
nameservers=dict(type='list'),
))
required_if = [('state', 'present', ['domain', 'nameservers'])]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=False)
state = module.params['state']
array = get_system(module)
if state == 'absent':
delete_dns(module, array)
elif state == 'present':
module.params['nameservers'] = remove(module.params['nameservers'])
create_dns(module, array)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
alhashash/odoomrp-wip | quality_control_samples/__openerp__.py | 13 | 1465 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Advanced Open Source Consulting
# Copyright (C) 2011 - 2013 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Quality control - Samples in inspections",
"version": "1.0",
"depends": [
"quality_control",
],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"category": "Quality control",
'data': [
'security/ir.model.access.csv',
'views/qc_sample_view.xml',
'views/qc_test_view.xml',
'views/qc_inspection_view.xml',
],
'installable': True,
}
| agpl-3.0 |
keithroe/vtkoptix | ThirdParty/Twisted/twisted/test/test_tcp.py | 34 | 66293 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTCP}.
"""
from __future__ import division, absolute_import
import socket, random, errno
from functools import wraps
from zope.interface import implementer
from twisted.trial import unittest
from twisted.python.log import msg
from twisted.internet import protocol, reactor, defer, interfaces
from twisted.internet import error
from twisted.internet.address import IPv4Address
from twisted.internet.interfaces import IHalfCloseableProtocol, IPullProducer
from twisted.protocols import policies
from twisted.test.proto_helpers import AccumulatingProtocol
def loopUntil(predicate, interval=0):
"""
Poor excuse for an event notification helper. This polls a condition and
calls back a Deferred when it is seen to be true.
Do not use this function.
"""
from twisted.internet import task
d = defer.Deferred()
def check():
res = predicate()
if res:
d.callback(res)
call = task.LoopingCall(check)
def stop(result):
call.stop()
return result
d.addCallback(stop)
d2 = call.start(interval)
d2.addErrback(d.errback)
return d
class ClosingProtocol(protocol.Protocol):
def connectionMade(self):
msg("ClosingProtocol.connectionMade")
self.transport.loseConnection()
def connectionLost(self, reason):
msg("ClosingProtocol.connectionLost")
reason.trap(error.ConnectionDone)
class ClosingFactory(protocol.ServerFactory):
"""
Factory that closes port immediately.
"""
_cleanerUpper = None
def buildProtocol(self, conn):
self._cleanerUpper = self.port.stopListening()
return ClosingProtocol()
def cleanUp(self):
"""
Clean-up for tests to wait for the port to stop listening.
"""
if self._cleanerUpper is None:
return self.port.stopListening()
return self._cleanerUpper
class MyProtocolFactoryMixin(object):
"""
Mixin for factories which create L{AccumulatingProtocol} instances.
@type protocolFactory: no-argument callable
@ivar protocolFactory: Factory for protocols - takes the place of the
typical C{protocol} attribute of factories (but that name is used by
this class for something else).
@type protocolConnectionMade: L{NoneType} or L{defer.Deferred}
@ivar protocolConnectionMade: When an instance of L{AccumulatingProtocol}
is connected, if this is not C{None}, the L{Deferred} will be called
back with the protocol instance and the attribute set to C{None}.
@type protocolConnectionLost: L{NoneType} or L{defer.Deferred}
@ivar protocolConnectionLost: When an instance of L{AccumulatingProtocol}
is created, this will be set as its C{closedDeferred} attribute and
then this attribute will be set to C{None} so the L{defer.Deferred} is
not used by more than one protocol.
@ivar protocol: The most recently created L{AccumulatingProtocol} instance
which was returned from C{buildProtocol}.
@type called: C{int}
@ivar called: A counter which is incremented each time C{buildProtocol}
is called.
@ivar peerAddresses: A C{list} of the addresses passed to C{buildProtocol}.
"""
protocolFactory = AccumulatingProtocol
protocolConnectionMade = None
protocolConnectionLost = None
protocol = None
called = 0
def __init__(self):
self.peerAddresses = []
def buildProtocol(self, addr):
"""
Create a L{AccumulatingProtocol} and set it up to be able to perform
callbacks.
"""
self.peerAddresses.append(addr)
self.called += 1
p = self.protocolFactory()
p.factory = self
p.closedDeferred = self.protocolConnectionLost
self.protocolConnectionLost = None
self.protocol = p
return p
class MyServerFactory(MyProtocolFactoryMixin, protocol.ServerFactory):
"""
Server factory which creates L{AccumulatingProtocol} instances.
"""
class MyClientFactory(MyProtocolFactoryMixin, protocol.ClientFactory):
"""
Client factory which creates L{AccumulatingProtocol} instances.
"""
failed = 0
stopped = 0
def __init__(self):
MyProtocolFactoryMixin.__init__(self)
self.deferred = defer.Deferred()
self.failDeferred = defer.Deferred()
def clientConnectionFailed(self, connector, reason):
self.failed = 1
self.reason = reason
self.failDeferred.callback(None)
def clientConnectionLost(self, connector, reason):
self.lostReason = reason
self.deferred.callback(None)
def stopFactory(self):
self.stopped = 1
class ListeningTestCase(unittest.TestCase):
def test_listen(self):
"""
L{IReactorTCP.listenTCP} returns an object which provides
L{IListeningPort}.
"""
f = MyServerFactory()
p1 = reactor.listenTCP(0, f, interface="127.0.0.1")
self.addCleanup(p1.stopListening)
self.failUnless(interfaces.IListeningPort.providedBy(p1))
def testStopListening(self):
"""
The L{IListeningPort} returned by L{IReactorTCP.listenTCP} can be
stopped with its C{stopListening} method. After the L{Deferred} it
(optionally) returns has been called back, the port number can be bound
to a new server.
"""
f = MyServerFactory()
port = reactor.listenTCP(0, f, interface="127.0.0.1")
n = port.getHost().port
def cbStopListening(ignored):
# Make sure we can rebind the port right away
port = reactor.listenTCP(n, f, interface="127.0.0.1")
return port.stopListening()
d = defer.maybeDeferred(port.stopListening)
d.addCallback(cbStopListening)
return d
def testNumberedInterface(self):
f = MyServerFactory()
# listen only on the loopback interface
p1 = reactor.listenTCP(0, f, interface='127.0.0.1')
return p1.stopListening()
def testPortRepr(self):
f = MyServerFactory()
p = reactor.listenTCP(0, f)
portNo = str(p.getHost().port)
self.failIf(repr(p).find(portNo) == -1)
def stoppedListening(ign):
self.failIf(repr(p).find(portNo) != -1)
d = defer.maybeDeferred(p.stopListening)
return d.addCallback(stoppedListening)
def test_serverRepr(self):
"""
Check that the repr string of the server transport get the good port
number if the server listens on 0.
"""
server = MyServerFactory()
serverConnMade = server.protocolConnectionMade = defer.Deferred()
port = reactor.listenTCP(0, server)
self.addCleanup(port.stopListening)
client = MyClientFactory()
clientConnMade = client.protocolConnectionMade = defer.Deferred()
connector = reactor.connectTCP("127.0.0.1",
port.getHost().port, client)
self.addCleanup(connector.disconnect)
def check(result):
serverProto, clientProto = result
portNumber = port.getHost().port
self.assertEqual(
repr(serverProto.transport),
"<AccumulatingProtocol #0 on %s>" % (portNumber,))
serverProto.transport.loseConnection()
clientProto.transport.loseConnection()
return defer.gatherResults([serverConnMade, clientConnMade]
).addCallback(check)
def test_restartListening(self):
"""
Stop and then try to restart a L{tcp.Port}: after a restart, the
server should be able to handle client connections.
"""
serverFactory = MyServerFactory()
port = reactor.listenTCP(0, serverFactory, interface="127.0.0.1")
self.addCleanup(port.stopListening)
def cbStopListening(ignored):
port.startListening()
client = MyClientFactory()
serverFactory.protocolConnectionMade = defer.Deferred()
client.protocolConnectionMade = defer.Deferred()
connector = reactor.connectTCP("127.0.0.1",
port.getHost().port, client)
self.addCleanup(connector.disconnect)
return defer.gatherResults([serverFactory.protocolConnectionMade,
client.protocolConnectionMade]
).addCallback(close)
def close(result):
serverProto, clientProto = result
clientProto.transport.loseConnection()
serverProto.transport.loseConnection()
d = defer.maybeDeferred(port.stopListening)
d.addCallback(cbStopListening)
return d
def test_exceptInStop(self):
"""
If the server factory raises an exception in C{stopFactory}, the
deferred returned by L{tcp.Port.stopListening} should fail with the
corresponding error.
"""
serverFactory = MyServerFactory()
def raiseException():
raise RuntimeError("An error")
serverFactory.stopFactory = raiseException
port = reactor.listenTCP(0, serverFactory, interface="127.0.0.1")
return self.assertFailure(port.stopListening(), RuntimeError)
def test_restartAfterExcept(self):
"""
Even if the server factory raise an exception in C{stopFactory}, the
corresponding C{tcp.Port} instance should be in a sane state and can
be restarted.
"""
serverFactory = MyServerFactory()
def raiseException():
raise RuntimeError("An error")
serverFactory.stopFactory = raiseException
port = reactor.listenTCP(0, serverFactory, interface="127.0.0.1")
self.addCleanup(port.stopListening)
def cbStopListening(ignored):
del serverFactory.stopFactory
port.startListening()
client = MyClientFactory()
serverFactory.protocolConnectionMade = defer.Deferred()
client.protocolConnectionMade = defer.Deferred()
connector = reactor.connectTCP("127.0.0.1",
port.getHost().port, client)
self.addCleanup(connector.disconnect)
return defer.gatherResults([serverFactory.protocolConnectionMade,
client.protocolConnectionMade]
).addCallback(close)
def close(result):
serverProto, clientProto = result
clientProto.transport.loseConnection()
serverProto.transport.loseConnection()
return self.assertFailure(port.stopListening(), RuntimeError
).addCallback(cbStopListening)
def test_directConnectionLostCall(self):
"""
If C{connectionLost} is called directly on a port object, it succeeds
(and doesn't expect the presence of a C{deferred} attribute).
C{connectionLost} is called by L{reactor.disconnectAll} at shutdown.
"""
serverFactory = MyServerFactory()
port = reactor.listenTCP(0, serverFactory, interface="127.0.0.1")
portNumber = port.getHost().port
port.connectionLost(None)
client = MyClientFactory()
serverFactory.protocolConnectionMade = defer.Deferred()
client.protocolConnectionMade = defer.Deferred()
reactor.connectTCP("127.0.0.1", portNumber, client)
def check(ign):
client.reason.trap(error.ConnectionRefusedError)
return client.failDeferred.addCallback(check)
def test_exceptInConnectionLostCall(self):
"""
If C{connectionLost} is called directory on a port object and that the
server factory raises an exception in C{stopFactory}, the exception is
passed through to the caller.
C{connectionLost} is called by L{reactor.disconnectAll} at shutdown.
"""
serverFactory = MyServerFactory()
def raiseException():
raise RuntimeError("An error")
serverFactory.stopFactory = raiseException
port = reactor.listenTCP(0, serverFactory, interface="127.0.0.1")
self.assertRaises(RuntimeError, port.connectionLost, None)
def callWithSpew(f):
from twisted.python.util import spewerWithLinenums as spewer
import sys
sys.settrace(spewer)
try:
f()
finally:
sys.settrace(None)
class LoopbackTestCase(unittest.TestCase):
"""
Test loopback connections.
"""
def test_closePortInProtocolFactory(self):
"""
A port created with L{IReactorTCP.listenTCP} can be connected to with
L{IReactorTCP.connectTCP}.
"""
f = ClosingFactory()
port = reactor.listenTCP(0, f, interface="127.0.0.1")
f.port = port
self.addCleanup(f.cleanUp)
portNumber = port.getHost().port
clientF = MyClientFactory()
reactor.connectTCP("127.0.0.1", portNumber, clientF)
def check(x):
self.assertTrue(clientF.protocol.made)
self.assertTrue(port.disconnected)
clientF.lostReason.trap(error.ConnectionDone)
return clientF.deferred.addCallback(check)
def _trapCnxDone(self, obj):
getattr(obj, 'trap', lambda x: None)(error.ConnectionDone)
def _connectedClientAndServerTest(self, callback):
"""
Invoke the given callback with a client protocol and a server protocol
which have been connected to each other.
"""
serverFactory = MyServerFactory()
serverConnMade = defer.Deferred()
serverFactory.protocolConnectionMade = serverConnMade
port = reactor.listenTCP(0, serverFactory, interface="127.0.0.1")
self.addCleanup(port.stopListening)
portNumber = port.getHost().port
clientF = MyClientFactory()
clientConnMade = defer.Deferred()
clientF.protocolConnectionMade = clientConnMade
reactor.connectTCP("127.0.0.1", portNumber, clientF)
connsMade = defer.gatherResults([serverConnMade, clientConnMade])
def connected(result):
serverProtocol, clientProtocol = result
callback(serverProtocol, clientProtocol)
serverProtocol.transport.loseConnection()
clientProtocol.transport.loseConnection()
connsMade.addCallback(connected)
return connsMade
def test_tcpNoDelay(self):
"""
The transport of a protocol connected with L{IReactorTCP.connectTCP} or
L{IReactor.TCP.listenTCP} can have its I{TCP_NODELAY} state inspected
and manipulated with L{ITCPTransport.getTcpNoDelay} and
L{ITCPTransport.setTcpNoDelay}.
"""
def check(serverProtocol, clientProtocol):
for p in [serverProtocol, clientProtocol]:
transport = p.transport
self.assertEqual(transport.getTcpNoDelay(), 0)
transport.setTcpNoDelay(1)
self.assertEqual(transport.getTcpNoDelay(), 1)
transport.setTcpNoDelay(0)
self.assertEqual(transport.getTcpNoDelay(), 0)
return self._connectedClientAndServerTest(check)
def test_tcpKeepAlive(self):
"""
The transport of a protocol connected with L{IReactorTCP.connectTCP} or
L{IReactor.TCP.listenTCP} can have its I{SO_KEEPALIVE} state inspected
and manipulated with L{ITCPTransport.getTcpKeepAlive} and
L{ITCPTransport.setTcpKeepAlive}.
"""
def check(serverProtocol, clientProtocol):
for p in [serverProtocol, clientProtocol]:
transport = p.transport
self.assertEqual(transport.getTcpKeepAlive(), 0)
transport.setTcpKeepAlive(1)
self.assertEqual(transport.getTcpKeepAlive(), 1)
transport.setTcpKeepAlive(0)
self.assertEqual(transport.getTcpKeepAlive(), 0)
return self._connectedClientAndServerTest(check)
def testFailing(self):
clientF = MyClientFactory()
# XXX we assume no one is listening on TCP port 69
reactor.connectTCP("127.0.0.1", 69, clientF, timeout=5)
def check(ignored):
clientF.reason.trap(error.ConnectionRefusedError)
return clientF.failDeferred.addCallback(check)
def test_connectionRefusedErrorNumber(self):
"""
Assert that the error number of the ConnectionRefusedError is
ECONNREFUSED, and not some other socket related error.
"""
# Bind a number of ports in the operating system. We will attempt
# to connect to these in turn immediately after closing them, in the
# hopes that no one else has bound them in the mean time. Any
# connection which succeeds is ignored and causes us to move on to
# the next port. As soon as a connection attempt fails, we move on
# to making an assertion about how it failed. If they all succeed,
# the test will fail.
# It would be nice to have a simpler, reliable way to cause a
# connection failure from the platform.
#
# On Linux (2.6.15), connecting to port 0 always fails. FreeBSD
# (5.4) rejects the connection attempt with EADDRNOTAVAIL.
#
# On FreeBSD (5.4), listening on a port and then repeatedly
# connecting to it without ever accepting any connections eventually
# leads to an ECONNREFUSED. On Linux (2.6.15), a seemingly
# unbounded number of connections succeed.
serverSockets = []
for i in range(10):
serverSocket = socket.socket()
serverSocket.bind(('127.0.0.1', 0))
serverSocket.listen(1)
serverSockets.append(serverSocket)
random.shuffle(serverSockets)
clientCreator = protocol.ClientCreator(reactor, protocol.Protocol)
def tryConnectFailure():
def connected(proto):
"""
Darn. Kill it and try again, if there are any tries left.
"""
proto.transport.loseConnection()
if serverSockets:
return tryConnectFailure()
self.fail("Could not fail to connect - could not test errno for that case.")
serverSocket = serverSockets.pop()
serverHost, serverPort = serverSocket.getsockname()
serverSocket.close()
connectDeferred = clientCreator.connectTCP(serverHost, serverPort)
connectDeferred.addCallback(connected)
return connectDeferred
refusedDeferred = tryConnectFailure()
self.assertFailure(refusedDeferred, error.ConnectionRefusedError)
def connRefused(exc):
self.assertEqual(exc.osError, errno.ECONNREFUSED)
refusedDeferred.addCallback(connRefused)
def cleanup(passthrough):
while serverSockets:
serverSockets.pop().close()
return passthrough
refusedDeferred.addBoth(cleanup)
return refusedDeferred
def test_connectByServiceFail(self):
"""
Connecting to a named service which does not exist raises
L{error.ServiceNameUnknownError}.
"""
self.assertRaises(
error.ServiceNameUnknownError,
reactor.connectTCP,
"127.0.0.1", "thisbetternotexist", MyClientFactory())
def test_connectByService(self):
"""
L{IReactorTCP.connectTCP} accepts the name of a service instead of a
port number and connects to the port number associated with that
service, as defined by L{socket.getservbyname}.
"""
serverFactory = MyServerFactory()
serverConnMade = defer.Deferred()
serverFactory.protocolConnectionMade = serverConnMade
port = reactor.listenTCP(0, serverFactory, interface="127.0.0.1")
self.addCleanup(port.stopListening)
portNumber = port.getHost().port
clientFactory = MyClientFactory()
clientConnMade = defer.Deferred()
clientFactory.protocolConnectionMade = clientConnMade
def fakeGetServicePortByName(serviceName, protocolName):
if serviceName == 'http' and protocolName == 'tcp':
return portNumber
return 10
self.patch(socket, 'getservbyname', fakeGetServicePortByName)
reactor.connectTCP('127.0.0.1', 'http', clientFactory)
connMade = defer.gatherResults([serverConnMade, clientConnMade])
def connected(result):
serverProtocol, clientProtocol = result
self.assertTrue(
serverFactory.called,
"Server factory was not called upon to build a protocol.")
serverProtocol.transport.loseConnection()
clientProtocol.transport.loseConnection()
connMade.addCallback(connected)
return connMade
class StartStopFactory(protocol.Factory):
started = 0
stopped = 0
def startFactory(self):
if self.started or self.stopped:
raise RuntimeError
self.started = 1
def stopFactory(self):
if not self.started or self.stopped:
raise RuntimeError
self.stopped = 1
class ClientStartStopFactory(MyClientFactory):
started = 0
stopped = 0
def __init__(self, *a, **kw):
MyClientFactory.__init__(self, *a, **kw)
self.whenStopped = defer.Deferred()
def startFactory(self):
if self.started or self.stopped:
raise RuntimeError
self.started = 1
def stopFactory(self):
if not self.started or self.stopped:
raise RuntimeError
self.stopped = 1
self.whenStopped.callback(True)
class FactoryTestCase(unittest.TestCase):
"""Tests for factories."""
def test_serverStartStop(self):
"""
The factory passed to L{IReactorTCP.listenTCP} should be started only
when it transitions from being used on no ports to being used on one
port and should be stopped only when it transitions from being used on
one port to being used on no ports.
"""
# Note - this test doesn't need to use listenTCP. It is exercising
# logic implemented in Factory.doStart and Factory.doStop, so it could
# just call that directly. Some other test can make sure that
# listenTCP and stopListening correctly call doStart and
# doStop. -exarkun
f = StartStopFactory()
# listen on port
p1 = reactor.listenTCP(0, f, interface='127.0.0.1')
self.addCleanup(p1.stopListening)
self.assertEqual((f.started, f.stopped), (1, 0))
# listen on two more ports
p2 = reactor.listenTCP(0, f, interface='127.0.0.1')
p3 = reactor.listenTCP(0, f, interface='127.0.0.1')
self.assertEqual((f.started, f.stopped), (1, 0))
# close two ports
d1 = defer.maybeDeferred(p1.stopListening)
d2 = defer.maybeDeferred(p2.stopListening)
closedDeferred = defer.gatherResults([d1, d2])
def cbClosed(ignored):
self.assertEqual((f.started, f.stopped), (1, 0))
# Close the last port
return p3.stopListening()
closedDeferred.addCallback(cbClosed)
def cbClosedAll(ignored):
self.assertEqual((f.started, f.stopped), (1, 1))
closedDeferred.addCallback(cbClosedAll)
return closedDeferred
def test_clientStartStop(self):
"""
The factory passed to L{IReactorTCP.connectTCP} should be started when
the connection attempt starts and stopped when it is over.
"""
f = ClosingFactory()
p = reactor.listenTCP(0, f, interface="127.0.0.1")
f.port = p
self.addCleanup(f.cleanUp)
portNumber = p.getHost().port
factory = ClientStartStopFactory()
reactor.connectTCP("127.0.0.1", portNumber, factory)
self.assertTrue(factory.started)
return loopUntil(lambda: factory.stopped)
class CannotBindTestCase(unittest.TestCase):
"""
Tests for correct behavior when a reactor cannot bind to the required TCP
port.
"""
def test_cannotBind(self):
"""
L{IReactorTCP.listenTCP} raises L{error.CannotListenError} if the
address to listen on is already in use.
"""
f = MyServerFactory()
p1 = reactor.listenTCP(0, f, interface='127.0.0.1')
self.addCleanup(p1.stopListening)
n = p1.getHost().port
dest = p1.getHost()
self.assertEqual(dest.type, "TCP")
self.assertEqual(dest.host, "127.0.0.1")
self.assertEqual(dest.port, n)
# make sure new listen raises error
self.assertRaises(error.CannotListenError,
reactor.listenTCP, n, f, interface='127.0.0.1')
def _fireWhenDoneFunc(self, d, f):
"""Returns closure that when called calls f and then callbacks d.
"""
@wraps(f)
def newf(*args, **kw):
rtn = f(*args, **kw)
d.callback('')
return rtn
return newf
def test_clientBind(self):
"""
L{IReactorTCP.connectTCP} calls C{Factory.clientConnectionFailed} with
L{error.ConnectBindError} if the bind address specified is already in
use.
"""
theDeferred = defer.Deferred()
sf = MyServerFactory()
sf.startFactory = self._fireWhenDoneFunc(theDeferred, sf.startFactory)
p = reactor.listenTCP(0, sf, interface="127.0.0.1")
self.addCleanup(p.stopListening)
def _connect1(results):
d = defer.Deferred()
cf1 = MyClientFactory()
cf1.buildProtocol = self._fireWhenDoneFunc(d, cf1.buildProtocol)
reactor.connectTCP("127.0.0.1", p.getHost().port, cf1,
bindAddress=("127.0.0.1", 0))
d.addCallback(_conmade, cf1)
return d
def _conmade(results, cf1):
d = defer.Deferred()
cf1.protocol.connectionMade = self._fireWhenDoneFunc(
d, cf1.protocol.connectionMade)
d.addCallback(_check1connect2, cf1)
return d
def _check1connect2(results, cf1):
self.assertEqual(cf1.protocol.made, 1)
d1 = defer.Deferred()
d2 = defer.Deferred()
port = cf1.protocol.transport.getHost().port
cf2 = MyClientFactory()
cf2.clientConnectionFailed = self._fireWhenDoneFunc(
d1, cf2.clientConnectionFailed)
cf2.stopFactory = self._fireWhenDoneFunc(d2, cf2.stopFactory)
reactor.connectTCP("127.0.0.1", p.getHost().port, cf2,
bindAddress=("127.0.0.1", port))
d1.addCallback(_check2failed, cf1, cf2)
d2.addCallback(_check2stopped, cf1, cf2)
dl = defer.DeferredList([d1, d2])
dl.addCallback(_stop, cf1, cf2)
return dl
def _check2failed(results, cf1, cf2):
self.assertEqual(cf2.failed, 1)
cf2.reason.trap(error.ConnectBindError)
self.assertTrue(cf2.reason.check(error.ConnectBindError))
return results
def _check2stopped(results, cf1, cf2):
self.assertEqual(cf2.stopped, 1)
return results
def _stop(results, cf1, cf2):
d = defer.Deferred()
d.addCallback(_check1cleanup, cf1)
cf1.stopFactory = self._fireWhenDoneFunc(d, cf1.stopFactory)
cf1.protocol.transport.loseConnection()
return d
def _check1cleanup(results, cf1):
self.assertEqual(cf1.stopped, 1)
theDeferred.addCallback(_connect1)
return theDeferred
class MyOtherClientFactory(protocol.ClientFactory):
def buildProtocol(self, address):
self.address = address
self.protocol = AccumulatingProtocol()
return self.protocol
class LocalRemoteAddressTestCase(unittest.TestCase):
"""
Tests for correct getHost/getPeer values and that the correct address is
passed to buildProtocol.
"""
def test_hostAddress(self):
"""
L{IListeningPort.getHost} returns the same address as a client
connection's L{ITCPTransport.getPeer}.
"""
serverFactory = MyServerFactory()
serverFactory.protocolConnectionLost = defer.Deferred()
serverConnectionLost = serverFactory.protocolConnectionLost
port = reactor.listenTCP(0, serverFactory, interface='127.0.0.1')
self.addCleanup(port.stopListening)
n = port.getHost().port
clientFactory = MyClientFactory()
onConnection = clientFactory.protocolConnectionMade = defer.Deferred()
connector = reactor.connectTCP('127.0.0.1', n, clientFactory)
def check(ignored):
self.assertEqual([port.getHost()], clientFactory.peerAddresses)
self.assertEqual(
port.getHost(), clientFactory.protocol.transport.getPeer())
onConnection.addCallback(check)
def cleanup(ignored):
# Clean up the client explicitly here so that tear down of
# the server side of the connection begins, then wait for
# the server side to actually disconnect.
connector.disconnect()
return serverConnectionLost
onConnection.addCallback(cleanup)
return onConnection
class WriterProtocol(protocol.Protocol):
def connectionMade(self):
# use everything ITransport claims to provide. If something here
# fails, the exception will be written to the log, but it will not
# directly flunk the test. The test will fail when maximum number of
# iterations have passed and the writer's factory.done has not yet
# been set.
self.transport.write(b"Hello Cleveland!\n")
seq = [b"Goodbye", b" cruel", b" world", b"\n"]
self.transport.writeSequence(seq)
peer = self.transport.getPeer()
if peer.type != "TCP":
msg("getPeer returned non-TCP socket: %s" % (peer,))
self.factory.problem = 1
us = self.transport.getHost()
if us.type != "TCP":
msg("getHost returned non-TCP socket: %s" % (us,))
self.factory.problem = 1
self.factory.done = 1
self.transport.loseConnection()
class ReaderProtocol(protocol.Protocol):
def dataReceived(self, data):
self.factory.data += data
def connectionLost(self, reason):
self.factory.done = 1
class WriterClientFactory(protocol.ClientFactory):
def __init__(self):
self.done = 0
self.data = b""
def buildProtocol(self, addr):
p = ReaderProtocol()
p.factory = self
self.protocol = p
return p
class WriteDataTestCase(unittest.TestCase):
"""
Test that connected TCP sockets can actually write data. Try to exercise
the entire ITransport interface.
"""
def test_writer(self):
"""
L{ITCPTransport.write} and L{ITCPTransport.writeSequence} send bytes to
the other end of the connection.
"""
f = protocol.Factory()
f.protocol = WriterProtocol
f.done = 0
f.problem = 0
wrappedF = WiredFactory(f)
p = reactor.listenTCP(0, wrappedF, interface="127.0.0.1")
self.addCleanup(p.stopListening)
n = p.getHost().port
clientF = WriterClientFactory()
wrappedClientF = WiredFactory(clientF)
reactor.connectTCP("127.0.0.1", n, wrappedClientF)
def check(ignored):
self.failUnless(f.done, "writer didn't finish, it probably died")
self.failUnless(f.problem == 0, "writer indicated an error")
self.failUnless(clientF.done,
"client didn't see connection dropped")
expected = b"".join([b"Hello Cleveland!\n",
b"Goodbye", b" cruel", b" world", b"\n"])
self.failUnless(clientF.data == expected,
"client didn't receive all the data it expected")
d = defer.gatherResults([wrappedF.onDisconnect,
wrappedClientF.onDisconnect])
return d.addCallback(check)
def test_writeAfterShutdownWithoutReading(self):
"""
A TCP transport which is written to after the connection has been shut
down should notify its protocol that the connection has been lost, even
if the TCP transport is not actively being monitored for read events
(ie, pauseProducing was called on it).
"""
# This is an unpleasant thing. Generally tests shouldn't skip or
# run based on the name of the reactor being used (most tests
# shouldn't care _at all_ what reactor is being used, in fact). The
# Gtk reactor cannot pass this test, though, because it fails to
# implement IReactorTCP entirely correctly. Gtk is quite old at
# this point, so it's more likely that gtkreactor will be deprecated
# and removed rather than fixed to handle this case correctly.
# Since this is a pre-existing (and very long-standing) issue with
# the Gtk reactor, there's no reason for it to prevent this test
# being added to exercise the other reactors, for which the behavior
# was also untested but at least works correctly (now). See #2833
# for information on the status of gtkreactor.
if reactor.__class__.__name__ == 'IOCPReactor':
raise unittest.SkipTest(
"iocpreactor does not, in fact, stop reading immediately after "
"pauseProducing is called. This results in a bonus disconnection "
"notification. Under some circumstances, it might be possible to "
"not receive this notifications (specifically, pauseProducing, "
"deliver some data, proceed with this test).")
if reactor.__class__.__name__ == 'GtkReactor':
raise unittest.SkipTest(
"gtkreactor does not implement unclean disconnection "
"notification correctly. This might more properly be "
"a todo, but due to technical limitations it cannot be.")
# Called back after the protocol for the client side of the connection
# has paused its transport, preventing it from reading, therefore
# preventing it from noticing the disconnection before the rest of the
# actions which are necessary to trigger the case this test is for have
# been taken.
clientPaused = defer.Deferred()
# Called back when the protocol for the server side of the connection
# has received connection lost notification.
serverLost = defer.Deferred()
class Disconnecter(protocol.Protocol):
"""
Protocol for the server side of the connection which disconnects
itself in a callback on clientPaused and publishes notification
when its connection is actually lost.
"""
def connectionMade(self):
"""
Set up a callback on clientPaused to lose the connection.
"""
msg('Disconnector.connectionMade')
def disconnect(ignored):
msg('Disconnector.connectionMade disconnect')
self.transport.loseConnection()
msg('loseConnection called')
clientPaused.addCallback(disconnect)
def connectionLost(self, reason):
"""
Notify observers that the server side of the connection has
ended.
"""
msg('Disconnecter.connectionLost')
serverLost.callback(None)
msg('serverLost called back')
# Create the server port to which a connection will be made.
server = protocol.ServerFactory()
server.protocol = Disconnecter
port = reactor.listenTCP(0, server, interface='127.0.0.1')
self.addCleanup(port.stopListening)
addr = port.getHost()
@implementer(IPullProducer)
class Infinite(object):
"""
A producer which will write to its consumer as long as
resumeProducing is called.
@ivar consumer: The L{IConsumer} which will be written to.
"""
def __init__(self, consumer):
self.consumer = consumer
def resumeProducing(self):
msg('Infinite.resumeProducing')
self.consumer.write(b'x')
msg('Infinite.resumeProducing wrote to consumer')
def stopProducing(self):
msg('Infinite.stopProducing')
class UnreadingWriter(protocol.Protocol):
"""
Trivial protocol which pauses its transport immediately and then
writes some bytes to it.
"""
def connectionMade(self):
msg('UnreadingWriter.connectionMade')
self.transport.pauseProducing()
clientPaused.callback(None)
msg('clientPaused called back')
def write(ignored):
msg('UnreadingWriter.connectionMade write')
# This needs to be enough bytes to spill over into the
# userspace Twisted send buffer - if it all fits into
# the kernel, Twisted won't even poll for OUT events,
# which means it won't poll for any events at all, so
# the disconnection is never noticed. This is due to
# #1662. When #1662 is fixed, this test will likely
# need to be adjusted, otherwise connection lost
# notification will happen too soon and the test will
# probably begin to fail with ConnectionDone instead of
# ConnectionLost (in any case, it will no longer be
# entirely correct).
producer = Infinite(self.transport)
msg('UnreadingWriter.connectionMade write created producer')
self.transport.registerProducer(producer, False)
msg('UnreadingWriter.connectionMade write registered producer')
serverLost.addCallback(write)
# Create the client and initiate the connection
client = MyClientFactory()
client.protocolFactory = UnreadingWriter
clientConnectionLost = client.deferred
def cbClientLost(ignored):
msg('cbClientLost')
return client.lostReason
clientConnectionLost.addCallback(cbClientLost)
msg('Connecting to %s:%s' % (addr.host, addr.port))
reactor.connectTCP(addr.host, addr.port, client)
# By the end of the test, the client should have received notification
# of unclean disconnection.
msg('Returning Deferred')
return self.assertFailure(clientConnectionLost, error.ConnectionLost)
class ConnectionLosingProtocol(protocol.Protocol):
def connectionMade(self):
self.transport.write(b"1")
self.transport.loseConnection()
self.master._connectionMade()
self.master.ports.append(self.transport)
class NoopProtocol(protocol.Protocol):
def connectionMade(self):
self.d = defer.Deferred()
self.master.serverConns.append(self.d)
def connectionLost(self, reason):
self.d.callback(True)
class ConnectionLostNotifyingProtocol(protocol.Protocol):
"""
Protocol which fires a Deferred which was previously passed to
its initializer when the connection is lost.
@ivar onConnectionLost: The L{Deferred} which will be fired in
C{connectionLost}.
@ivar lostConnectionReason: C{None} until the connection is lost, then a
reference to the reason passed to C{connectionLost}.
"""
def __init__(self, onConnectionLost):
self.lostConnectionReason = None
self.onConnectionLost = onConnectionLost
def connectionLost(self, reason):
self.lostConnectionReason = reason
self.onConnectionLost.callback(self)
class HandleSavingProtocol(ConnectionLostNotifyingProtocol):
"""
Protocol which grabs the platform-specific socket handle and
saves it as an attribute on itself when the connection is
established.
"""
def makeConnection(self, transport):
"""
Save the platform-specific socket handle for future
introspection.
"""
self.handle = transport.getHandle()
return protocol.Protocol.makeConnection(self, transport)
class ProperlyCloseFilesMixin:
"""
Tests for platform resources properly being cleaned up.
"""
def createServer(self, address, portNumber, factory):
"""
Bind a server port to which connections will be made. The server
should use the given protocol factory.
@return: The L{IListeningPort} for the server created.
"""
raise NotImplementedError()
def connectClient(self, address, portNumber, clientCreator):
"""
Establish a connection to the given address using the given
L{ClientCreator} instance.
@return: A Deferred which will fire with the connected protocol instance.
"""
raise NotImplementedError()
def getHandleExceptionType(self):
"""
Return the exception class which will be raised when an operation is
attempted on a closed platform handle.
"""
raise NotImplementedError()
def getHandleErrorCode(self):
"""
Return the errno expected to result from writing to a closed
platform socket handle.
"""
# These platforms have been seen to give EBADF:
#
# Linux 2.4.26, Linux 2.6.15, OS X 10.4, FreeBSD 5.4
# Windows 2000 SP 4, Windows XP SP 2
return errno.EBADF
def test_properlyCloseFiles(self):
"""
Test that lost connections properly have their underlying socket
resources cleaned up.
"""
onServerConnectionLost = defer.Deferred()
serverFactory = protocol.ServerFactory()
serverFactory.protocol = lambda: ConnectionLostNotifyingProtocol(
onServerConnectionLost)
serverPort = self.createServer('127.0.0.1', 0, serverFactory)
onClientConnectionLost = defer.Deferred()
serverAddr = serverPort.getHost()
clientCreator = protocol.ClientCreator(
reactor, lambda: HandleSavingProtocol(onClientConnectionLost))
clientDeferred = self.connectClient(
serverAddr.host, serverAddr.port, clientCreator)
def clientConnected(client):
"""
Disconnect the client. Return a Deferred which fires when both
the client and the server have received disconnect notification.
"""
client.transport.write(
b'some bytes to make sure the connection is set up')
client.transport.loseConnection()
return defer.gatherResults([
onClientConnectionLost, onServerConnectionLost])
clientDeferred.addCallback(clientConnected)
def clientDisconnected(result):
"""
Verify that the underlying platform socket handle has been
cleaned up.
"""
client, server = result
client.lostConnectionReason.trap(error.ConnectionClosed)
server.lostConnectionReason.trap(error.ConnectionClosed)
expectedErrorCode = self.getHandleErrorCode()
err = self.assertRaises(
self.getHandleExceptionType(), client.handle.send, b'bytes')
self.assertEqual(err.args[0], expectedErrorCode)
clientDeferred.addCallback(clientDisconnected)
def cleanup(passthrough):
"""
Shut down the server port. Return a Deferred which fires when
this has completed.
"""
result = defer.maybeDeferred(serverPort.stopListening)
result.addCallback(lambda ign: passthrough)
return result
clientDeferred.addBoth(cleanup)
return clientDeferred
class ProperlyCloseFilesTestCase(unittest.TestCase, ProperlyCloseFilesMixin):
"""
Test that the sockets created by L{IReactorTCP.connectTCP} are cleaned up
when the connection they are associated with is closed.
"""
def createServer(self, address, portNumber, factory):
"""
Create a TCP server using L{IReactorTCP.listenTCP}.
"""
return reactor.listenTCP(portNumber, factory, interface=address)
def connectClient(self, address, portNumber, clientCreator):
"""
Create a TCP client using L{IReactorTCP.connectTCP}.
"""
return clientCreator.connectTCP(address, portNumber)
def getHandleExceptionType(self):
"""
Return L{socket.error} as the expected error type which will be
raised by a write to the low-level socket object after it has been
closed.
"""
return socket.error
class WiredForDeferreds(policies.ProtocolWrapper):
def __init__(self, factory, wrappedProtocol):
policies.ProtocolWrapper.__init__(self, factory, wrappedProtocol)
def connectionMade(self):
policies.ProtocolWrapper.connectionMade(self)
self.factory.onConnect.callback(None)
def connectionLost(self, reason):
policies.ProtocolWrapper.connectionLost(self, reason)
self.factory.onDisconnect.callback(None)
class WiredFactory(policies.WrappingFactory):
protocol = WiredForDeferreds
def __init__(self, wrappedFactory):
policies.WrappingFactory.__init__(self, wrappedFactory)
self.onConnect = defer.Deferred()
self.onDisconnect = defer.Deferred()
class AddressTestCase(unittest.TestCase):
"""
Tests for address-related interactions with client and server protocols.
"""
def setUp(self):
"""
Create a port and connected client/server pair which can be used
to test factory behavior related to addresses.
@return: A L{defer.Deferred} which will be called back when both the
client and server protocols have received their connection made
callback.
"""
class RememberingWrapper(protocol.ClientFactory):
"""
Simple wrapper factory which records the addresses which are
passed to its L{buildProtocol} method and delegates actual
protocol creation to another factory.
@ivar addresses: A list of the objects passed to buildProtocol.
@ivar factory: The wrapped factory to which protocol creation is
delegated.
"""
def __init__(self, factory):
self.addresses = []
self.factory = factory
# Only bother to pass on buildProtocol calls to the wrapped
# factory - doStart, doStop, etc aren't necessary for this test
# to pass.
def buildProtocol(self, addr):
"""
Append the given address to C{self.addresses} and forward
the call to C{self.factory}.
"""
self.addresses.append(addr)
return self.factory.buildProtocol(addr)
# Make a server which we can receive connection and disconnection
# notification for, and which will record the address passed to its
# buildProtocol.
self.server = MyServerFactory()
self.serverConnMade = self.server.protocolConnectionMade = defer.Deferred()
self.serverConnLost = self.server.protocolConnectionLost = defer.Deferred()
# RememberingWrapper is a ClientFactory, but ClientFactory is-a
# ServerFactory, so this is okay.
self.serverWrapper = RememberingWrapper(self.server)
# Do something similar for a client.
self.client = MyClientFactory()
self.clientConnMade = self.client.protocolConnectionMade = defer.Deferred()
self.clientConnLost = self.client.protocolConnectionLost = defer.Deferred()
self.clientWrapper = RememberingWrapper(self.client)
self.port = reactor.listenTCP(0, self.serverWrapper, interface='127.0.0.1')
self.connector = reactor.connectTCP(
self.port.getHost().host, self.port.getHost().port, self.clientWrapper)
return defer.gatherResults([self.serverConnMade, self.clientConnMade])
def tearDown(self):
"""
Disconnect the client/server pair and shutdown the port created in
L{setUp}.
"""
self.connector.disconnect()
return defer.gatherResults([
self.serverConnLost, self.clientConnLost,
defer.maybeDeferred(self.port.stopListening)])
def test_buildProtocolClient(self):
"""
L{ClientFactory.buildProtocol} should be invoked with the address of
the server to which a connection has been established, which should
be the same as the address reported by the C{getHost} method of the
transport of the server protocol and as the C{getPeer} method of the
transport of the client protocol.
"""
serverHost = self.server.protocol.transport.getHost()
clientPeer = self.client.protocol.transport.getPeer()
self.assertEqual(
self.clientWrapper.addresses,
[IPv4Address('TCP', serverHost.host, serverHost.port)])
self.assertEqual(
self.clientWrapper.addresses,
[IPv4Address('TCP', clientPeer.host, clientPeer.port)])
class LargeBufferWriterProtocol(protocol.Protocol):
# Win32 sockets cannot handle single huge chunks of bytes. Write one
# massive string to make sure Twisted deals with this fact.
def connectionMade(self):
# write 60MB
self.transport.write(b'X'*self.factory.len)
self.factory.done = 1
self.transport.loseConnection()
class LargeBufferReaderProtocol(protocol.Protocol):
def dataReceived(self, data):
self.factory.len += len(data)
def connectionLost(self, reason):
self.factory.done = 1
class LargeBufferReaderClientFactory(protocol.ClientFactory):
def __init__(self):
self.done = 0
self.len = 0
def buildProtocol(self, addr):
p = LargeBufferReaderProtocol()
p.factory = self
self.protocol = p
return p
class FireOnClose(policies.ProtocolWrapper):
"""A wrapper around a protocol that makes it fire a deferred when
connectionLost is called.
"""
def connectionLost(self, reason):
policies.ProtocolWrapper.connectionLost(self, reason)
self.factory.deferred.callback(None)
class FireOnCloseFactory(policies.WrappingFactory):
protocol = FireOnClose
def __init__(self, wrappedFactory):
policies.WrappingFactory.__init__(self, wrappedFactory)
self.deferred = defer.Deferred()
class LargeBufferTestCase(unittest.TestCase):
"""Test that buffering large amounts of data works.
"""
datalen = 60*1024*1024
def testWriter(self):
f = protocol.Factory()
f.protocol = LargeBufferWriterProtocol
f.done = 0
f.problem = 0
f.len = self.datalen
wrappedF = FireOnCloseFactory(f)
p = reactor.listenTCP(0, wrappedF, interface="127.0.0.1")
self.addCleanup(p.stopListening)
n = p.getHost().port
clientF = LargeBufferReaderClientFactory()
wrappedClientF = FireOnCloseFactory(clientF)
reactor.connectTCP("127.0.0.1", n, wrappedClientF)
d = defer.gatherResults([wrappedF.deferred, wrappedClientF.deferred])
def check(ignored):
self.failUnless(f.done, "writer didn't finish, it probably died")
self.failUnless(clientF.len == self.datalen,
"client didn't receive all the data it expected "
"(%d != %d)" % (clientF.len, self.datalen))
self.failUnless(clientF.done,
"client didn't see connection dropped")
return d.addCallback(check)
@implementer(IHalfCloseableProtocol)
class MyHCProtocol(AccumulatingProtocol):
readHalfClosed = False
writeHalfClosed = False
def readConnectionLost(self):
self.readHalfClosed = True
# Invoke notification logic from the base class to simplify testing.
if self.writeHalfClosed:
self.connectionLost(None)
def writeConnectionLost(self):
self.writeHalfClosed = True
# Invoke notification logic from the base class to simplify testing.
if self.readHalfClosed:
self.connectionLost(None)
class MyHCFactory(protocol.ServerFactory):
called = 0
protocolConnectionMade = None
def buildProtocol(self, addr):
self.called += 1
p = MyHCProtocol()
p.factory = self
self.protocol = p
return p
class HalfCloseTestCase(unittest.TestCase):
"""Test half-closing connections."""
def setUp(self):
self.f = f = MyHCFactory()
self.p = p = reactor.listenTCP(0, f, interface="127.0.0.1")
self.addCleanup(p.stopListening)
d = loopUntil(lambda :p.connected)
self.cf = protocol.ClientCreator(reactor, MyHCProtocol)
d.addCallback(lambda _: self.cf.connectTCP(p.getHost().host,
p.getHost().port))
d.addCallback(self._setUp)
return d
def _setUp(self, client):
self.client = client
self.clientProtoConnectionLost = self.client.closedDeferred = defer.Deferred()
self.assertEqual(self.client.transport.connected, 1)
# Wait for the server to notice there is a connection, too.
return loopUntil(lambda: getattr(self.f, 'protocol', None) is not None)
def tearDown(self):
self.assertEqual(self.client.closed, 0)
self.client.transport.loseConnection()
d = defer.maybeDeferred(self.p.stopListening)
d.addCallback(lambda ign: self.clientProtoConnectionLost)
d.addCallback(self._tearDown)
return d
def _tearDown(self, ignored):
self.assertEqual(self.client.closed, 1)
# because we did half-close, the server also needs to
# closed explicitly.
self.assertEqual(self.f.protocol.closed, 0)
d = defer.Deferred()
def _connectionLost(reason):
self.f.protocol.closed = 1
d.callback(None)
self.f.protocol.connectionLost = _connectionLost
self.f.protocol.transport.loseConnection()
d.addCallback(lambda x:self.assertEqual(self.f.protocol.closed, 1))
return d
def testCloseWriteCloser(self):
client = self.client
f = self.f
t = client.transport
t.write(b"hello")
d = loopUntil(lambda :len(t._tempDataBuffer) == 0)
def loseWrite(ignored):
t.loseWriteConnection()
return loopUntil(lambda :t._writeDisconnected)
def check(ignored):
self.assertEqual(client.closed, False)
self.assertEqual(client.writeHalfClosed, True)
self.assertEqual(client.readHalfClosed, False)
return loopUntil(lambda :f.protocol.readHalfClosed)
def write(ignored):
w = client.transport.write
w(b" world")
w(b"lalala fooled you")
self.assertEqual(0, len(client.transport._tempDataBuffer))
self.assertEqual(f.protocol.data, b"hello")
self.assertEqual(f.protocol.closed, False)
self.assertEqual(f.protocol.readHalfClosed, True)
return d.addCallback(loseWrite).addCallback(check).addCallback(write)
def testWriteCloseNotification(self):
f = self.f
f.protocol.transport.loseWriteConnection()
d = defer.gatherResults([
loopUntil(lambda :f.protocol.writeHalfClosed),
loopUntil(lambda :self.client.readHalfClosed)])
d.addCallback(lambda _: self.assertEqual(
f.protocol.readHalfClosed, False))
return d
class HalfClose2TestCase(unittest.TestCase):
def setUp(self):
self.f = f = MyServerFactory()
self.f.protocolConnectionMade = defer.Deferred()
self.p = p = reactor.listenTCP(0, f, interface="127.0.0.1")
# XXX we don't test server side yet since we don't do it yet
d = protocol.ClientCreator(reactor, AccumulatingProtocol).connectTCP(
p.getHost().host, p.getHost().port)
d.addCallback(self._gotClient)
return d
def _gotClient(self, client):
self.client = client
# Now wait for the server to catch up - it doesn't matter if this
# Deferred has already fired and gone away, in that case we'll
# return None and not wait at all, which is precisely correct.
return self.f.protocolConnectionMade
def tearDown(self):
self.client.transport.loseConnection()
return self.p.stopListening()
def testNoNotification(self):
"""
TCP protocols support half-close connections, but not all of them
support being notified of write closes. In this case, test that
half-closing the connection causes the peer's connection to be
closed.
"""
self.client.transport.write(b"hello")
self.client.transport.loseWriteConnection()
self.f.protocol.closedDeferred = d = defer.Deferred()
self.client.closedDeferred = d2 = defer.Deferred()
d.addCallback(lambda x:
self.assertEqual(self.f.protocol.data, b'hello'))
d.addCallback(lambda x: self.assertEqual(self.f.protocol.closed, True))
return defer.gatherResults([d, d2])
def testShutdownException(self):
"""
If the other side has already closed its connection,
loseWriteConnection should pass silently.
"""
self.f.protocol.transport.loseConnection()
self.client.transport.write(b"X")
self.client.transport.loseWriteConnection()
self.f.protocol.closedDeferred = d = defer.Deferred()
self.client.closedDeferred = d2 = defer.Deferred()
d.addCallback(lambda x:
self.assertEqual(self.f.protocol.closed, True))
return defer.gatherResults([d, d2])
class HalfCloseBuggyApplicationTests(unittest.TestCase):
"""
Test half-closing connections where notification code has bugs.
"""
def setUp(self):
"""
Set up a server and connect a client to it. Return a Deferred which
only fires once this is done.
"""
self.serverFactory = MyHCFactory()
self.serverFactory.protocolConnectionMade = defer.Deferred()
self.port = reactor.listenTCP(
0, self.serverFactory, interface="127.0.0.1")
self.addCleanup(self.port.stopListening)
addr = self.port.getHost()
creator = protocol.ClientCreator(reactor, MyHCProtocol)
clientDeferred = creator.connectTCP(addr.host, addr.port)
def setClient(clientProtocol):
self.clientProtocol = clientProtocol
clientDeferred.addCallback(setClient)
return defer.gatherResults([
self.serverFactory.protocolConnectionMade,
clientDeferred])
def aBug(self, *args):
"""
Fake implementation of a callback which illegally raises an
exception.
"""
raise RuntimeError("ONO I AM BUGGY CODE")
def _notificationRaisesTest(self):
"""
Helper for testing that an exception is logged by the time the
client protocol loses its connection.
"""
closed = self.clientProtocol.closedDeferred = defer.Deferred()
self.clientProtocol.transport.loseWriteConnection()
def check(ignored):
errors = self.flushLoggedErrors(RuntimeError)
self.assertEqual(len(errors), 1)
closed.addCallback(check)
return closed
def test_readNotificationRaises(self):
"""
If C{readConnectionLost} raises an exception when the transport
calls it to notify the protocol of that event, the exception should
be logged and the protocol should be disconnected completely.
"""
self.serverFactory.protocol.readConnectionLost = self.aBug
return self._notificationRaisesTest()
def test_writeNotificationRaises(self):
"""
If C{writeConnectionLost} raises an exception when the transport
calls it to notify the protocol of that event, the exception should
be logged and the protocol should be disconnected completely.
"""
self.clientProtocol.writeConnectionLost = self.aBug
return self._notificationRaisesTest()
class LogTestCase(unittest.TestCase):
"""
Test logging facility of TCP base classes.
"""
def test_logstrClientSetup(self):
"""
Check that the log customization of the client transport happens
once the client is connected.
"""
server = MyServerFactory()
client = MyClientFactory()
client.protocolConnectionMade = defer.Deferred()
port = reactor.listenTCP(0, server, interface='127.0.0.1')
self.addCleanup(port.stopListening)
connector = reactor.connectTCP(
port.getHost().host, port.getHost().port, client)
self.addCleanup(connector.disconnect)
# It should still have the default value
self.assertEqual(connector.transport.logstr,
"Uninitialized")
def cb(ign):
self.assertEqual(connector.transport.logstr,
"AccumulatingProtocol,client")
client.protocolConnectionMade.addCallback(cb)
return client.protocolConnectionMade
class PauseProducingTestCase(unittest.TestCase):
"""
Test some behaviors of pausing the production of a transport.
"""
def test_pauseProducingInConnectionMade(self):
"""
In C{connectionMade} of a client protocol, C{pauseProducing} used to be
ignored: this test is here to ensure it's not ignored.
"""
server = MyServerFactory()
client = MyClientFactory()
client.protocolConnectionMade = defer.Deferred()
port = reactor.listenTCP(0, server, interface='127.0.0.1')
self.addCleanup(port.stopListening)
connector = reactor.connectTCP(
port.getHost().host, port.getHost().port, client)
self.addCleanup(connector.disconnect)
def checkInConnectionMade(proto):
tr = proto.transport
# The transport should already be monitored
self.assertIn(tr, reactor.getReaders() +
reactor.getWriters())
proto.transport.pauseProducing()
self.assertNotIn(tr, reactor.getReaders() +
reactor.getWriters())
d = defer.Deferred()
d.addCallback(checkAfterConnectionMade)
reactor.callLater(0, d.callback, proto)
return d
def checkAfterConnectionMade(proto):
tr = proto.transport
# The transport should still not be monitored
self.assertNotIn(tr, reactor.getReaders() +
reactor.getWriters())
client.protocolConnectionMade.addCallback(checkInConnectionMade)
return client.protocolConnectionMade
if not interfaces.IReactorFDSet.providedBy(reactor):
test_pauseProducingInConnectionMade.skip = "Reactor not providing IReactorFDSet"
class CallBackOrderTestCase(unittest.TestCase):
"""
Test the order of reactor callbacks
"""
def test_loseOrder(self):
"""
Check that Protocol.connectionLost is called before factory's
clientConnectionLost
"""
server = MyServerFactory()
server.protocolConnectionMade = (defer.Deferred()
.addCallback(lambda proto: self.addCleanup(
proto.transport.loseConnection)))
client = MyClientFactory()
client.protocolConnectionLost = defer.Deferred()
client.protocolConnectionMade = defer.Deferred()
def _cbCM(res):
"""
protocol.connectionMade callback
"""
reactor.callLater(0, client.protocol.transport.loseConnection)
client.protocolConnectionMade.addCallback(_cbCM)
port = reactor.listenTCP(0, server, interface='127.0.0.1')
self.addCleanup(port.stopListening)
connector = reactor.connectTCP(
port.getHost().host, port.getHost().port, client)
self.addCleanup(connector.disconnect)
def _cbCCL(res):
"""
factory.clientConnectionLost callback
"""
return 'CCL'
def _cbCL(res):
"""
protocol.connectionLost callback
"""
return 'CL'
def _cbGather(res):
self.assertEqual(res, ['CL', 'CCL'])
d = defer.gatherResults([
client.protocolConnectionLost.addCallback(_cbCL),
client.deferred.addCallback(_cbCCL)])
return d.addCallback(_cbGather)
try:
import resource
except ImportError:
pass
else:
numRounds = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + 10
ProperlyCloseFilesTestCase.numberRounds = numRounds
| bsd-3-clause |
tedor/home-blog | debug/pysrc/pydev_runfiles_parallel_client.py | 7 | 8023 | from pydevd_constants import * #@UnusedWildImport
try:
from Queue import Queue
except:
from queue import Queue #@UnresolvedImport
import threading
from pydev_imports import xmlrpclib
import traceback
import time
from pydev_runfiles_coverage import StartCoverageSupportFromParams
#=======================================================================================================================
# ParallelNotification
#=======================================================================================================================
class ParallelNotification(object):
def __init__(self, method, args, kwargs):
self.method = method
self.args = args
self.kwargs = kwargs
def ToTuple(self):
return self.method, self.args, self.kwargs
#=======================================================================================================================
# KillServer
#=======================================================================================================================
class KillServer(object):
pass
#=======================================================================================================================
# ServerComm
#=======================================================================================================================
class ServerComm(threading.Thread):
def __init__(self, job_id, server):
self.notifications_queue = Queue()
threading.Thread.__init__(self)
self.setDaemon(False) #Wait for all the notifications to be passed before exiting!
assert job_id is not None
assert port is not None
self.job_id = job_id
self.finished = False
self.server = server
def run(self):
while True:
kill_found = False
commands = []
command = self.notifications_queue.get(block=True)
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.ToTuple())
try:
while True:
command = self.notifications_queue.get(block=False) #No block to create a batch.
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.ToTuple())
except:
pass #That's OK, we're getting it until it becomes empty so that we notify multiple at once.
if commands:
try:
#Batch notification.
self.server.lock.acquire()
try:
self.server.notifyCommands(self.job_id, commands)
finally:
self.server.lock.release()
except:
traceback.print_exc()
if kill_found:
self.finished = True
return
#=======================================================================================================================
# ServerFacade
#=======================================================================================================================
class ServerFacade(object):
def __init__(self, notifications_queue):
self.notifications_queue = notifications_queue
def notifyTestsCollected(self, *args, **kwargs):
pass #This notification won't be passed
def notifyTestRunFinished(self, *args, **kwargs):
pass #This notification won't be passed
def notifyStartTest(self, *args, **kwargs):
self.notifications_queue.put_nowait(ParallelNotification('notifyStartTest', args, kwargs))
def notifyTest(self, *args, **kwargs):
self.notifications_queue.put_nowait(ParallelNotification('notifyTest', args, kwargs))
#=======================================================================================================================
# run_client
#=======================================================================================================================
def run_client(job_id, port, verbosity, coverage_output_file, coverage_include):
job_id = int(job_id)
import pydev_localhost
server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), port))
server.lock = threading.Lock()
server_comm = ServerComm(job_id, server)
server_comm.start()
try:
server_facade = ServerFacade(server_comm.notifications_queue)
import pydev_runfiles
import pydev_runfiles_xml_rpc
pydev_runfiles_xml_rpc.SetServer(server_facade)
#Starts None and when the 1st test is gotten, it's started (because a server may be initiated and terminated
#before receiving any test -- which would mean a different process got all the tests to run).
coverage = None
try:
tests_to_run = [1]
while tests_to_run:
#Investigate: is it dangerous to use the same xmlrpclib server from different threads?
#It seems it should be, as it creates a new connection for each request...
server.lock.acquire()
try:
tests_to_run = server.GetTestsToRun(job_id)
finally:
server.lock.release()
if not tests_to_run:
break
if coverage is None:
_coverage_files, coverage = StartCoverageSupportFromParams(
None, coverage_output_file, 1, coverage_include)
files_to_tests = {}
for test in tests_to_run:
filename_and_test = test.split('|')
if len(filename_and_test) == 2:
files_to_tests.setdefault(filename_and_test[0], []).append(filename_and_test[1])
configuration = pydev_runfiles.Configuration(
'',
verbosity,
None,
None,
None,
files_to_tests,
1, #Always single job here
None,
#The coverage is handled in this loop.
coverage_output_file=None,
coverage_include=None,
)
test_runner = pydev_runfiles.PydevTestRunner(configuration)
sys.stdout.flush()
test_runner.run_tests(handle_coverage=False)
finally:
if coverage is not None:
coverage.stop()
coverage.save()
except:
traceback.print_exc()
server_comm.notifications_queue.put_nowait(KillServer())
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
if len(sys.argv) -1 == 3:
job_id, port, verbosity = sys.argv[1:]
coverage_output_file, coverage_include = None, None
elif len(sys.argv) -1 == 5:
job_id, port, verbosity, coverage_output_file, coverage_include = sys.argv[1:]
else:
raise AssertionError('Could not find out how to handle the parameters: '+sys.argv[1:])
job_id = int(job_id)
port = int(port)
verbosity = int(verbosity)
run_client(job_id, port, verbosity, coverage_output_file, coverage_include)
| bsd-3-clause |
robovm/robovm-studio | python/lib/Lib/site-packages/django/contrib/localflavor/pl/forms.py | 273 | 5444 | """
Polish-specific form helpers
"""
import re
from django.forms import ValidationError
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
from django.core.validators import EMPTY_VALUES
class PLProvinceSelect(Select):
"""
A select widget with list of Polish administrative provinces as choices.
"""
def __init__(self, attrs=None):
from pl_voivodeships import VOIVODESHIP_CHOICES
super(PLProvinceSelect, self).__init__(attrs, choices=VOIVODESHIP_CHOICES)
class PLCountySelect(Select):
"""
A select widget with list of Polish administrative units as choices.
"""
def __init__(self, attrs=None):
from pl_administrativeunits import ADMINISTRATIVE_UNIT_CHOICES
super(PLCountySelect, self).__init__(attrs, choices=ADMINISTRATIVE_UNIT_CHOICES)
class PLPESELField(RegexField):
"""
A form field that validates as Polish Identification Number (PESEL).
Checks the following rules:
* the length consist of 11 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/PESEL.
"""
default_error_messages = {
'invalid': _(u'National Identification Number consists of 11 digits.'),
'checksum': _(u'Wrong checksum for the National Identification Number.'),
}
def __init__(self, *args, **kwargs):
super(PLPESELField, self).__init__(r'^\d{11}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLPESELField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(number)):
result += int(number[i]) * multiple_table[i]
return result % 10 == 0
class PLNIPField(RegexField):
"""
A form field that validates as Polish Tax Number (NIP).
Valid forms are: XXX-XXX-YY-YY or XX-XX-YYY-YYY.
Checksum algorithm based on documentation at
http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX or XX-XX-XXX-XXX.'),
'checksum': _(u'Wrong checksum for the Tax Number (NIP).'),
}
def __init__(self, *args, **kwargs):
super(PLNIPField, self).__init__(r'^\d{3}-\d{3}-\d{2}-\d{2}$|^\d{2}-\d{2}-\d{3}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLNIPField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub("[-]", "", value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (6, 5, 7, 2, 3, 4, 5, 6, 7)
result = 0
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == int(number[-1]):
return True
else:
return False
class PLREGONField(RegexField):
"""
A form field that validates its input is a REGON number.
Valid regon number consists of 9 or 14 digits.
See http://www.stat.gov.pl/bip/regon_ENG_HTML.htm for more information.
"""
default_error_messages = {
'invalid': _(u'National Business Register Number (REGON) consists of 9 or 14 digits.'),
'checksum': _(u'Wrong checksum for the National Business Register Number (REGON).'),
}
def __init__(self, *args, **kwargs):
super(PLREGONField, self).__init__(r'^\d{9,14}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLREGONField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
weights = (
(8, 9, 2, 3, 4, 5, 6, 7, -1),
(2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8, -1),
(8, 9, 2, 3, 4, 5, 6, 7, -1, 0, 0, 0, 0, 0),
)
weights = [table for table in weights if len(table) == len(number)]
for table in weights:
checksum = sum([int(n) * w for n, w in zip(number, table)])
if checksum % 11 % 10:
return False
return bool(weights)
class PLPostalCodeField(RegexField):
"""
A form field that validates as Polish postal code.
Valid code is XX-XXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PLPostalCodeField, self).__init__(r'^\d{2}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
| apache-2.0 |
jjmachan/activityPointsApp | activitypoints/lib/python3.5/site-packages/django/utils/encoding.py | 47 | 10078 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import datetime
import locale
from decimal import Decimal
from django.utils import six
from django.utils.functional import Promise
from django.utils.six.moves.urllib.parse import quote, unquote
if six.PY3:
from urllib.parse import unquote_to_bytes
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj, type(self.obj))
# For backwards compatibility. (originally in Django, then added to six 1.9)
python_2_unicode_compatible = six.python_2_unicode_compatible
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
_PROTECTED_TYPES = six.integer_types + (
type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, '__unicode__'):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join(force_text(arg, encoding, strings_only, errors)
for arg in s)
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join(force_bytes(arg, encoding, strings_only, errors)
for arg in s)
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Takes an IRI in UTF-8 bytes (e.g. '/I \xe2\x99\xa5 Django/') or unicode
(e.g. '/I ♥ Django/') and returns ASCII bytes containing the encoded result
(e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def uri_to_iri(uri):
"""
Converts a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from section 3.2 of RFC 3987.
Takes an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and returns
unicode containing the encoded result (e.g. '/I \xe2\x99\xa5 Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
iri = unquote_to_bytes(uri) if six.PY3 else unquote(uri)
return repercent_broken_unicode(iri).decode('utf-8')
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in
# sections 2.2 and 2.3 of RFC 2396:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to section 3.3 of RFC 2396.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(force_bytes(path), safe=b"/:@&+$,-_.!~*'()")
def repercent_broken_unicode(path):
"""
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
we need to re-percent-encode any octet produced that is not part of a
strictly legal UTF-8 octet sequence.
"""
try:
path.decode('utf-8')
except UnicodeDecodeError as e:
repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = repercent_broken_unicode(
path[:e.start] + force_bytes(repercent) + path[e.end:])
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| mit |
USGSDenverPychron/pychron | pychron/core/ui/image_editor.py | 1 | 1115 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pychron.core.ui.factory import toolkit_factory
# ============= standard library imports ========================
# ============= local library imports ==========================
ImageEditor = toolkit_factory('image_editor', 'ImageEditor')
# ============= EOF =============================================
| apache-2.0 |
ShawnPengxy/Flask-madeBlog | site-packages/pygments/lexers/sql.py | 70 | 23461 | # -*- coding: utf-8 -*-
"""
pygments.lexers.sql
~~~~~~~~~~~~~~~~~~~
Lexers for various SQL dialects and related interactive sessions.
Postgres specific lexers:
`PostgresLexer`
A SQL lexer for the PostgreSQL dialect. Differences w.r.t. the SQL
lexer are:
- keywords and data types list parsed from the PG docs (run the
`_postgres_builtins` module to update them);
- Content of $-strings parsed using a specific lexer, e.g. the content
of a PL/Python function is parsed using the Python lexer;
- parse PG specific constructs: E-strings, $-strings, U&-strings,
different operators and punctuation.
`PlPgsqlLexer`
A lexer for the PL/pgSQL language. Adds a few specific construct on
top of the PG SQL lexer (such as <<label>>).
`PostgresConsoleLexer`
A lexer to highlight an interactive psql session:
- identifies the prompt and does its best to detect the end of command
in multiline statement where not all the lines are prefixed by a
prompt, telling them apart from the output;
- highlights errors in the output and notification levels;
- handles psql backslash commands.
The ``tests/examplefiles`` contains a few test files with data to be
parsed by these lexers.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.lexers import get_lexer_by_name, ClassNotFound
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
PSEUDO_TYPES, PLPGSQL_KEYWORDS
__all__ = ['PostgresLexer', 'PlPgsqlLexer', 'PostgresConsoleLexer',
'SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer']
line_re = re.compile('.*?\n')
language_re = re.compile(r"\s+LANGUAGE\s+'?(\w+)'?", re.IGNORECASE)
def language_callback(lexer, match):
"""Parse the content of a $-string using a lexer
The lexer is chosen looking for a nearby LANGUAGE.
"""
l = None
m = language_re.match(lexer.text[match.end():match.end()+100])
if m is not None:
l = lexer._get_lexer(m.group(1))
else:
m = list(language_re.finditer(
lexer.text[max(0, match.start()-100):match.start()]))
if m:
l = lexer._get_lexer(m[-1].group(1))
if l:
yield (match.start(1), String, match.group(1))
for x in l.get_tokens_unprocessed(match.group(2)):
yield x
yield (match.start(3), String, match.group(3))
else:
yield (match.start(), String, match.group())
class PostgresBase(object):
"""Base class for Postgres-related lexers.
This is implemented as a mixin to avoid the Lexer metaclass kicking in.
this way the different lexer don't have a common Lexer ancestor. If they
had, _tokens could be created on this ancestor and not updated for the
other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming
seem to suggest that regexp lexers are not really subclassable.
"""
def get_tokens_unprocessed(self, text, *args):
# Have a copy of the entire text to be used by `language_callback`.
self.text = text
for x in super(PostgresBase, self).get_tokens_unprocessed(
text, *args):
yield x
def _get_lexer(self, lang):
if lang.lower() == 'sql':
return get_lexer_by_name('postgresql', **self.options)
tries = [ lang ]
if lang.startswith('pl'):
tries.append(lang[2:])
if lang.endswith('u'):
tries.append(lang[:-1])
if lang.startswith('pl') and lang.endswith('u'):
tries.append(lang[2:-1])
for l in tries:
try:
return get_lexer_by_name(l, **self.options)
except ClassNotFound:
pass
else:
# TODO: better logging
# print >>sys.stderr, "language not found:", lang
return None
class PostgresLexer(PostgresBase, RegexLexer):
"""
Lexer for the PostgreSQL dialect of SQL.
*New in Pygments 1.5.*
"""
name = 'PostgreSQL SQL dialect'
aliases = ['postgresql', 'postgres']
mimetypes = ['text/x-postgresql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(' + '|'.join([s.replace(" ", "\s+")
for s in DATATYPES + PSEUDO_TYPES])
+ r')\b', Name.Builtin),
(r'(' + '|'.join(KEYWORDS) + r')\b', Keyword),
(r'[+*/<>=~!@#%^&|`?-]+', Operator),
(r'::', Operator), # cast
(r'\$\d+', Name.Variable),
(r'([0-9]*\.[0-9]*|[0-9]+)(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+', Number.Integer),
(r"(E|U&)?'(''|[^'])*'", String.Single),
(r'(U&)?"(""|[^"])*"', String.Name), # quoted identifier
(r'(?s)(\$[^\$]*\$)(.*?)(\1)', language_callback),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
# psql variable in SQL
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
(r'[;:()\[\]\{\},\.]', Punctuation),
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
],
}
class PlPgsqlLexer(PostgresBase, RegexLexer):
"""
Handle the extra syntax in Pl/pgSQL language.
*New in Pygments 1.5.*
"""
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
if pattern[1] == Keyword:
tokens['root'][i] = (
r'(' + '|'.join(KEYWORDS + PLPGSQL_KEYWORDS) + r')\b',
Keyword)
del i
break
else:
assert 0, "SQL keywords not found"
# Add specific PL/pgSQL rules (before the SQL ones)
tokens['root'][:0] = [
(r'\%[a-z][a-z0-9_]*\b', Name.Builtin), # actually, a datatype
(r':=', Operator),
(r'\<\<[a-z][a-z0-9_]*\>\>', Name.Label),
(r'\#[a-z][a-z0-9_]*\b', Keyword.Pseudo), # #variable_conflict
]
class PsqlRegexLexer(PostgresBase, RegexLexer):
"""
Extend the PostgresLexer adding support specific for psql commands.
This is not a complete psql lexer yet as it lacks prompt support
and output rendering.
"""
name = 'PostgreSQL console - regexp based lexer'
aliases = [] # not public
flags = re.IGNORECASE
tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.iteritems())
tokens['root'].append(
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
tokens['psql-command'] = [
(r'\n', Text, 'root'),
(r'\s+', Text),
(r'\\[^\s]+', Keyword.Pseudo),
(r""":(['"]?)[a-z][a-z0-9_]*\b\1""", Name.Variable),
(r"'(''|[^'])*'", String.Single),
(r"`([^`])*`", String.Backtick),
(r"[^\s]+", String.Symbol),
]
re_prompt = re.compile(r'^(\S.*?)??[=\-\(\$\'\"][#>]')
re_psql_command = re.compile(r'\s*\\')
re_end_command = re.compile(r';\s*(--.*?)?$')
re_psql_command = re.compile(r'(\s*)(\\.+?)(\s+)$')
re_error = re.compile(r'(ERROR|FATAL):')
re_message = re.compile(
r'((?:DEBUG|INFO|NOTICE|WARNING|ERROR|'
r'FATAL|HINT|DETAIL|CONTEXT|LINE [0-9]+):)(.*?\n)')
class lookahead(object):
"""Wrap an iterator and allow pushing back an item."""
def __init__(self, x):
self.iter = iter(x)
self._nextitem = None
def __iter__(self):
return self
def send(self, i):
self._nextitem = i
return i
def next(self):
if self._nextitem is not None:
ni = self._nextitem
self._nextitem = None
return ni
return self.iter.next()
class PostgresConsoleLexer(Lexer):
"""
Lexer for psql sessions.
*New in Pygments 1.5.*
"""
name = 'PostgreSQL console (psql)'
aliases = ['psql', 'postgresql-console', 'postgres-console']
mimetypes = ['text/x-postgresql-psql']
def get_tokens_unprocessed(self, data):
sql = PsqlRegexLexer(**self.options)
lines = lookahead(line_re.findall(data))
# prompt-output cycle
while 1:
# consume the lines of the command: start with an optional prompt
# and continue until the end of command is detected
curcode = ''
insertions = []
while 1:
try:
line = lines.next()
except StopIteration:
# allow the emission of partially collected items
# the repl loop will be broken below
break
# Identify a shell prompt in case of psql commandline example
if line.startswith('$') and not curcode:
lexer = get_lexer_by_name('console', **self.options)
for x in lexer.get_tokens_unprocessed(line):
yield x
break
# Identify a psql prompt
mprompt = re_prompt.match(line)
if mprompt is not None:
insertions.append((len(curcode),
[(0, Generic.Prompt, mprompt.group())]))
curcode += line[len(mprompt.group()):]
else:
curcode += line
# Check if this is the end of the command
# TODO: better handle multiline comments at the end with
# a lexer with an external state?
if re_psql_command.match(curcode) \
or re_end_command.search(curcode):
break
# Emit the combined stream of command and prompt(s)
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
# Emit the output lines
out_token = Generic.Output
while 1:
line = lines.next()
mprompt = re_prompt.match(line)
if mprompt is not None:
# push the line back to have it processed by the prompt
lines.send(line)
break
mmsg = re_message.match(line)
if mmsg is not None:
if mmsg.group(1).startswith("ERROR") \
or mmsg.group(1).startswith("FATAL"):
out_token = Generic.Error
yield (mmsg.start(1), Generic.Strong, mmsg.group(1))
yield (mmsg.start(2), out_token, mmsg.group(2))
else:
yield (0, out_token, line)
class SqlLexer(RegexLexer):
"""
Lexer for Structured Query Language. Currently, this lexer does
not recognize any special syntax except ANSI SQL.
"""
name = 'SQL'
aliases = ['sql']
filenames = ['*.sql']
mimetypes = ['text/x-sql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'--.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|'
r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|'
r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|'
r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|'
r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|'
r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|'
r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|'
r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|'
r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|'
r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|'
r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|'
r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|'
r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|'
r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|'
r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|'
r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|'
r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|'
r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|'
r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|'
r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|'
r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|'
r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|'
r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|'
r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|'
r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|'
r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|'
r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|'
r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|'
r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|'
r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|'
r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|'
r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|'
r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|'
r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|'
r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|'
r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|'
r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|'
r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|'
r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|'
r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|'
r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|'
r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|'
r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|'
r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|'
r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|'
r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|'
r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|'
r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|'
r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|'
r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|'
r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|'
r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|'
r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|'
r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|'
r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|'
r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|'
r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|'
r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|'
r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|'
r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|'
r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|'
r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|'
r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|'
r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|'
r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|'
r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|'
r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|'
r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|'
r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|'
r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|'
r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|'
r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|'
r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|'
r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|'
r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword),
(r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|'
r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|'
r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b',
Name.Builtin),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'[0-9]+', Number.Integer),
# TODO: Backslash escapes?
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class MySqlLexer(RegexLexer):
"""
Special lexer for MySQL.
"""
name = 'MySQL'
aliases = ['mysql']
mimetypes = ['text/x-mysql']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#|--\s+).*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'[0-9]+', Number.Integer),
(r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float),
# TODO: add backslash escapes
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r"`(``|[^`])*`", String.Symbol),
(r'[+*/<>=~!@#%^&|`?-]', Operator),
(r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|'
r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|'
r'tinyblob|mediumblob|longblob|blob|float|double|double\s+'
r'precision|real|numeric|dec|decimal|timestamp|year|char|'
r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?',
bygroups(Keyword.Type, Text, Punctuation)),
(r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|'
r'bigint|binary|blob|both|by|call|cascade|case|change|char|'
r'character|check|collate|column|condition|constraint|continue|'
r'convert|create|cross|current_date|current_time|'
r'current_timestamp|current_user|cursor|database|databases|'
r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|'
r'declare|default|delayed|delete|desc|describe|deterministic|'
r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|'
r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8'
r'|for|force|foreign|from|fulltext|grant|group|having|'
r'high_priority|hour_microsecond|hour_minute|hour_second|if|'
r'ignore|in|index|infile|inner|inout|insensitive|insert|int|'
r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|'
r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|'
r'localtime|localtimestamp|lock|long|loop|low_priority|match|'
r'minute_microsecond|minute_second|mod|modifies|natural|'
r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|'
r'or|order|out|outer|outfile|precision|primary|procedure|purge|'
r'raid0|read|reads|real|references|regexp|release|rename|repeat|'
r'replace|require|restrict|return|revoke|right|rlike|schema|'
r'schemas|second_microsecond|select|sensitive|separator|set|'
r'show|smallint|soname|spatial|specific|sql|sql_big_result|'
r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|'
r'sqlwarning|ssl|starting|straight_join|table|terminated|then|'
r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|'
r'usage|use|using|utc_date|utc_time|utc_timestamp|values|'
r'varying|when|where|while|with|write|x509|xor|year_month|'
r'zerofill)\b', Keyword),
# TODO: this list is not complete
(r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo),
(r'(true|false|null)', Name.Constant),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable),
(r'[;:()\[\],\.]', Punctuation)
],
'multiline-comments': [
(r'/\*', Comment.Multiline, 'multiline-comments'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[^/\*]+', Comment.Multiline),
(r'[/*]', Comment.Multiline)
]
}
class SqliteConsoleLexer(Lexer):
"""
Lexer for example sessions using sqlite3.
*New in Pygments 0.11.*
"""
name = 'sqlite3con'
aliases = ['sqlite3']
filenames = ['*.sqlite3-console']
mimetypes = ['text/x-sqlite3-console']
def get_tokens_unprocessed(self, data):
sql = SqlLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(data):
line = match.group()
if line.startswith('sqlite> ') or line.startswith(' ...> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:8])]))
curcode += line[8:]
else:
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('SQL error: '):
yield (match.start(), Generic.Traceback, line)
else:
yield (match.start(), Generic.Output, line)
if curcode:
for item in do_insertions(insertions,
sql.get_tokens_unprocessed(curcode)):
yield item
| mit |
alivesay/squire | lib/lockfile-0.9.1/lockfile/mkdirlockfile.py | 18 | 2701 | from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
# Someone else has the lock.
raise AlreadyLocked
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| unlicense |
sidartaoliveira/ansible | lib/ansible/modules/cloud/amazon/cloudtrail.py | 44 | 23831 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudtrail
short_description: manage CloudTrail create, delete, update
description:
- Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- "Ansible Core Team"
- "Ted Timmons"
- "Daniel Shepherd (@shepdelacreme)"
requirements:
- boto3
- botocore
options:
state:
description:
- Add or remove CloudTrail configuration.
- The following states have been preserved for backwards compatibility. C(state=enabled) and C(state=disabled).
- enabled=present and disabled=absent.
required: true
choices: ['present', 'absent', 'enabled', 'disabled']
name:
description:
- Name for the CloudTrail.
- Names are unique per-region unless the CloudTrail is a mulit-region trail, in which case it is unique per-account.
required: true
enable_logging:
description:
- Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
default: true
version_added: "2.4"
s3_bucket_name:
description:
- An existing S3 bucket where CloudTrail will deliver log files.
- This bucket should exist and have the proper policy.
- See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- Required when C(state=present)
version_added: "2.4"
s3_key_prefix:
description:
- S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
is_multi_region_trail:
description:
- Specify whether the trail belongs only to one region or exists in all regions.
default: false
version_added: "2.4"
enable_log_file_validation:
description:
- Specifies whether log file integrity validation is enabled.
- CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
default: false
version_added: "2.4"
include_global_events:
description:
- Record API calls from global services such as IAM and STS.
default: true
sns_topic_name:
description:
- SNS Topic name to send notifications to when a log file is delivered
version_added: "2.4"
cloudwatch_logs_role_arn:
description:
- Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group listed below.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html)
- "Example arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
- Required when C(cloudwatch_logs_log_group_arn)
version_added: "2.4"
cloudwatch_logs_log_group_arn:
description:
- A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html)
- "Example arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
- Required when C(cloudwatch_logs_role_arn)
version_added: "2.4"
kms_key_id:
description:
- Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
- The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
- Examples
- alias/MyAliasName
- "arn:aws:kms:us-east-1:123456789012:alias/MyAliasName"
- "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
- 12345678-1234-1234-1234-123456789012
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html)
version_added: "2.4"
tags:
description:
- A hash/dictionary of tags to be applied to the CloudTrail resource.
- Remove completely or specify an empty dictionary to remove all tags.
default: {}
version_added: "2.4"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: create single region cloudtrail
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: create multi-region trail with validation and tags
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
tags:
environment: dev
Name: default
- name: pause logging the trail we just created
cloudtrail:
state: present
name: default
enable_logging: false
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
tags:
environment: dev
Name: default
- name: delete a trail
cloudtrail:
state: absent
name: default
'''
RETURN = '''
exists:
description: whether the resource exists
returned: always
type: bool
sample: true
trail:
description: CloudTrail resource details
returned: always
type: complex
sample: hash/dictionary of values
contains:
trail_arn:
description: Full ARN of the CloudTrail resource
returned: success
type: string
sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
name:
description: Name of the CloudTrail resource
returned: success
type: string
sample: default
is_logging:
description: Whether logging is turned on or paused for the Trail
returned: success
type: bool
sample: True
s3_bucket_name:
description: S3 bucket name where log files are delivered
returned: success
type: string
sample: myBucket
s3_key_prefix:
description: Key prefix in bucket where log files are delivered (if any)
returned: success when present
type: string
sample: myKeyPrefix
log_file_validation_enabled:
description: Whether log file validation is enabled on the trail
returned: success
type: bool
sample: true
include_global_service_events:
description: Whether global services (IAM, STS) are logged with this trail
returned: success
type: bool
sample: true
is_multi_region_trail:
description: Whether the trail applies to all regions or just one
returned: success
type: bool
sample: true
has_custom_event_selectors:
description: Whether any custom event selectors are used for this trail.
returned: success
type: bool
sample: False
home_region:
description: The home region where the trail was originally created and must be edited.
returned: success
type: string
sample: us-east-1
sns_topic_name:
description: The SNS topic name where log delivery notifications are sent.
returned: success when present
type: string
sample: myTopic
sns_topic_arn:
description: Full ARN of the SNS topic where log delivery notifications are sent.
returned: success when present
type: string
sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
cloud_watch_logs_log_group_arn:
description: Full ARN of the CloudWatch Logs log group where events are delivered.
returned: success when present
type: string
sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
cloud_watch_logs_role_arn:
description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
returned: success when present
type: string
sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
kms_key_id:
description: Full ARN of the KMS Key used to encrypt log files.
returned: success when present
type: string
sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
tags:
description: hash/dictionary of tags applied to this resource
returned: success
type: dict
sample: {'environment': 'dev', 'Name': 'default'}
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec
from ansible.module_utils.ec2 import get_aws_connection_info, HAS_BOTO3
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from botocore.exceptions import ClientError
def create_trail(module, client, ct_params):
"""
Creates a CloudTrail
module : AnisbleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to create
"""
resp = {}
try:
resp = client.create_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return resp
def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False):
"""
Creates, updates, removes tags on a CloudTrail resource
module : AnisbleModule object
client : boto3 client connection object
tags : Dict of tags converted from ansible_dict to boto3 list of dicts
trail_arn : The ARN of the CloudTrail to operate on
curr_tags : Dict of the current tags on resource, if any
dry_run : true/false to determine if changes will be made if needed
"""
adds = []
removes = []
updates = []
changed = False
if curr_tags is None:
# No current tags so just convert all to a tag list
adds = ansible_dict_to_boto3_tag_list(tags)
else:
curr_keys = set(curr_tags.keys())
new_keys = set(tags.keys())
add_keys = new_keys - curr_keys
remove_keys = curr_keys - new_keys
update_keys = dict()
for k in curr_keys.intersection(new_keys):
if curr_tags[k] != tags[k]:
update_keys.update({k: tags[k]})
adds = get_tag_list(add_keys, tags)
removes = get_tag_list(remove_keys, curr_tags)
updates = get_tag_list(update_keys, tags)
if removes or updates:
changed = True
if not dry_run:
try:
client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
if updates or adds:
changed = True
if not dry_run:
try:
client.add_tags(ResourceId=trail_arn, TagsList=updates + adds)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return changed
def get_tag_list(keys, tags):
"""
Returns a list of dicts with tags to act on
keys : set of keys to get the values for
tags : the dict of tags to turn into a list
"""
tag_list = []
for k in keys:
tag_list.append({'Key': k, 'Value': tags[k]})
return tag_list
def set_logging(module, client, name, action):
"""
Starts or stops logging based on given state
module : AnsibleModule object
client : boto3 client connection object
name : The name or ARN of the CloudTrail to operate on
action : start or stop
"""
if action == 'start':
try:
client.start_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
elif action == 'stop':
try:
client.stop_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
else:
module.fail_json(msg="Unsupported logging action")
def get_trail_facts(module, client, name):
"""
Describes existing trail in an account
module : AnsibleModule object
client : boto3 client connection object
name : Name of the trail
"""
# get Trail info
try:
trail_resp = client.describe_trails(trailNameList=[name])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Now check to see if our trail exists and get status and tags
if len(trail_resp['trailList']):
trail = trail_resp['trailList'][0]
try:
status_resp = client.get_trail_status(Name=trail['Name'])
tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
trail['IsLogging'] = status_resp['IsLogging']
trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
# Check for non-existent values and populate with None
optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
for v in optional_vals - set(trail.keys()):
trail[v] = None
return trail
else:
# trail doesn't exist return None
return None
def delete_trail(module, client, trail_arn):
"""
Delete a CloudTrail
module : AnisbleModule object
client : boto3 client connection object
trail_arn : Full CloudTrail ARN
"""
try:
client.delete_trail(Name=trail_arn)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def update_trail(module, client, ct_params):
"""
Delete a CloudTrail
module : AnisbleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to update
"""
try:
client.update_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
name=dict(default='default'),
enable_logging=dict(default=True, type='bool'),
s3_bucket_name=dict(),
s3_key_prefix=dict(),
sns_topic_name=dict(),
is_multi_region_trail=dict(default=False, type='bool'),
enable_log_file_validation=dict(default=False, type='bool'),
include_global_events=dict(default=True, type='bool'),
cloudwatch_logs_role_arn=dict(),
cloudwatch_logs_log_group_arn=dict(),
kms_key_id=dict(),
tags=dict(default={}, type='dict'),
))
required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
# collect parameters
if module.params['state'] in ('present', 'enabled'):
state = 'present'
elif module.params['state'] in ('absent', 'disabled'):
state = 'absent'
tags = module.params['tags']
enable_logging = module.params['enable_logging']
ct_params = dict(
Name=module.params['name'],
S3BucketName=module.params['s3_bucket_name'],
IncludeGlobalServiceEvents=module.params['include_global_events'],
IsMultiRegionTrail=module.params['is_multi_region_trail'],
EnableLogFileValidation=module.params['enable_log_file_validation'],
S3KeyPrefix='',
SnsTopicName='',
CloudWatchLogsRoleArn='',
CloudWatchLogsLogGroupArn='',
KmsKeyId=''
)
if module.params['s3_key_prefix']:
ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
if module.params['sns_topic_name']:
ct_params['SnsTopicName'] = module.params['sns_topic_name']
if module.params['cloudwatch_logs_role_arn']:
ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
if module.params['cloudwatch_logs_log_group_arn']:
ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
if module.params['kms_key_id']:
ct_params['KmsKeyId'] = module.params['kms_key_id']
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
results = dict(
changed=False,
exists=False
)
# Get existing trail facts
trail = get_trail_facts(module, client, ct_params['Name'])
# If the trail exists set the result exists variable
if trail is not None:
results['exists'] = True
if state == 'absent' and results['exists']:
# If Trail exists go ahead and delete
results['changed'] = True
results['exists'] = False
results['trail'] = dict()
if not module.check_mode:
delete_trail(module, client, trail['TrailARN'])
elif state == 'present' and results['exists']:
# If Trail exists see if we need to update it
do_update = False
for key in ct_params:
tkey = str(key)
# boto3 has inconsistent parameter naming so we handle it here
if key == 'EnableLogFileValidation':
tkey = 'LogFileValidationEnabled'
# We need to make an empty string equal None
if ct_params.get(key) == '':
val = None
else:
val = ct_params.get(key)
if val != trail.get(tkey):
do_update = True
results['changed'] = True
# If we are in check mode copy the changed values to the trail facts in result output to show what would change.
if module.check_mode:
trail.update({tkey: ct_params.get(key)})
if not module.check_mode and do_update:
update_trail(module, client, ct_params)
trail = get_trail_facts(module, client, ct_params['Name'])
# Check if we need to start/stop logging
if enable_logging and not trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = True
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = False
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Check if we need to update tags on resource
tag_dry_run = False
if module.check_mode:
tag_dry_run = True
tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
if tags_changed:
results['changed'] = True
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
elif state == 'present' and not results['exists']:
# Trail doesn't exist just go create it
results['changed'] = True
if not module.check_mode:
# If we aren't in check_mode then actually create it
created_trail = create_trail(module, client, ct_params)
# Apply tags
tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
# Get the trail status
try:
status_resp = client.get_trail_status(Name=created_trail['Name'])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Set the logging state for the trail to desired value
if enable_logging and not status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Get facts for newly created Trail
trail = get_trail_facts(module, client, ct_params['Name'])
# If we are in check mode create a fake return structure for the newly minted trail
if module.check_mode:
acct_id = '123456789012'
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params)
acct_id = sts_client.get_caller_identity()['Account']
except ClientError:
pass
trail = dict()
trail.update(ct_params)
trail['LogFileValidationEnabled'] = ct_params['EnableLogFileValidation']
trail.pop('EnableLogFileValidation')
fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
trail['HasCustomEventSelectors'] = False
trail['HomeRegion'] = region
trail['TrailARN'] = fake_arn
trail['IsLogging'] = enable_logging
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
AshleyLai/vtpm | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
inspyration/odoo | addons/mrp_repair/wizard/__init__.py | 445 | 1096 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cancel_repair
import make_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
happyleavesaoc/home-assistant | homeassistant/helpers/aiohttp_client.py | 5 | 5303 | """Helper for aiohttp webclient stuff."""
import asyncio
import sys
import aiohttp
from aiohttp.hdrs import USER_AGENT, CONTENT_TYPE
from aiohttp import web
from aiohttp.web_exceptions import HTTPGatewayTimeout, HTTPBadGateway
import async_timeout
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.const import __version__
DATA_CONNECTOR = 'aiohttp_connector'
DATA_CONNECTOR_NOTVERIFY = 'aiohttp_connector_notverify'
DATA_CLIENTSESSION = 'aiohttp_clientsession'
DATA_CLIENTSESSION_NOTVERIFY = 'aiohttp_clientsession_notverify'
SERVER_SOFTWARE = 'HomeAssistant/{0} aiohttp/{1} Python/{2[0]}.{2[1]}'.format(
__version__, aiohttp.__version__, sys.version_info)
@callback
def async_get_clientsession(hass, verify_ssl=True):
"""Return default aiohttp ClientSession.
This method must be run in the event loop.
"""
if verify_ssl:
key = DATA_CLIENTSESSION
else:
key = DATA_CLIENTSESSION_NOTVERIFY
if key not in hass.data:
connector = _async_get_connector(hass, verify_ssl)
clientsession = aiohttp.ClientSession(
loop=hass.loop,
connector=connector,
headers={USER_AGENT: SERVER_SOFTWARE}
)
_async_register_clientsession_shutdown(hass, clientsession)
hass.data[key] = clientsession
return hass.data[key]
@callback
def async_create_clientsession(hass, verify_ssl=True, auto_cleanup=True,
**kwargs):
"""Create a new ClientSession with kwargs, i.e. for cookies.
If auto_cleanup is False, you need to call detach() after the session
returned is no longer used. Default is True, the session will be
automatically detached on homeassistant_stop.
This method must be run in the event loop.
"""
connector = _async_get_connector(hass, verify_ssl)
clientsession = aiohttp.ClientSession(
loop=hass.loop,
connector=connector,
headers={USER_AGENT: SERVER_SOFTWARE},
**kwargs
)
if auto_cleanup:
_async_register_clientsession_shutdown(hass, clientsession)
return clientsession
@asyncio.coroutine
def async_aiohttp_proxy_web(hass, request, web_coro, buffer_size=102400,
timeout=10):
"""Stream websession request to aiohttp web response."""
try:
with async_timeout.timeout(timeout, loop=hass.loop):
req = yield from web_coro
except asyncio.CancelledError:
# The user cancelled the request
return
except asyncio.TimeoutError as err:
# Timeout trying to start the web request
raise HTTPGatewayTimeout() from err
except aiohttp.ClientError as err:
# Something went wrong with the connection
raise HTTPBadGateway() from err
yield from async_aiohttp_proxy_stream(hass, request, req.content,
req.headers.get(CONTENT_TYPE))
@asyncio.coroutine
def async_aiohttp_proxy_stream(hass, request, stream, content_type,
buffer_size=102400, timeout=10):
"""Stream a stream to aiohttp web response."""
response = web.StreamResponse()
response.content_type = content_type
yield from response.prepare(request)
try:
while True:
with async_timeout.timeout(timeout, loop=hass.loop):
data = yield from stream.read(buffer_size)
if not data:
yield from response.write_eof()
break
response.write(data)
except (asyncio.TimeoutError, aiohttp.ClientError):
# Something went wrong fetching data, close connection gracefully
yield from response.write_eof()
except asyncio.CancelledError:
# The user closed the connection
pass
@callback
# pylint: disable=invalid-name
def _async_register_clientsession_shutdown(hass, clientsession):
"""Register ClientSession close on Home Assistant shutdown.
This method must be run in the event loop.
"""
@callback
def _async_close_websession(event):
"""Close websession."""
clientsession.detach()
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, _async_close_websession)
@callback
def _async_get_connector(hass, verify_ssl=True):
"""Return the connector pool for aiohttp.
This method must be run in the event loop.
"""
is_new = False
if verify_ssl:
if DATA_CONNECTOR not in hass.data:
connector = aiohttp.TCPConnector(loop=hass.loop)
hass.data[DATA_CONNECTOR] = connector
is_new = True
else:
connector = hass.data[DATA_CONNECTOR]
else:
if DATA_CONNECTOR_NOTVERIFY not in hass.data:
connector = aiohttp.TCPConnector(loop=hass.loop, verify_ssl=False)
hass.data[DATA_CONNECTOR_NOTVERIFY] = connector
is_new = True
else:
connector = hass.data[DATA_CONNECTOR_NOTVERIFY]
if is_new:
@callback
def _async_close_connector(event):
"""Close connector pool."""
connector.close()
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_CLOSE, _async_close_connector)
return connector
| apache-2.0 |
KaimingOuyang/HPC-K-Means | papi-5.4.3/src/libpfm-3.y/python/self.py | 9 | 2058 | #!/usr/bin/env python
#
# Copyright (c) 2008 Google, Inc.
# Contributed by Arun Sharma <arun.sharma@google.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Self monitoring example. Copied from self.c
import os
from optparse import OptionParser
import random
import errno
from perfmon import *
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-e", "--events", help="Events to use",
action="store", dest="events")
(options, args) = parser.parse_args()
s = PerThreadSession(int(os.getpid()))
if options.events:
events = options.events.split(",")
else:
raise "You need to specify events to monitor"
s.dispatch_events(events)
s.load()
s.start()
# code to be measured
#
# note that this is not identical to what examples/self.c does
# thus counts will be different in the end
for i in range(1, 10000000):
random.random()
s.stop()
# read the counts
for i in xrange(s.npmds):
print """PMD%d\t%lu""" % (s.pmds[0][i].reg_num, s.pmds[0][i].reg_value)
| bsd-3-clause |
jnewbery/bitcoin | test/functional/p2p_fingerprint.py | 28 | 5044 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
from test_framework.p2p import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
p2p_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class P2PFingerprintTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(MSG_BLOCK, block_hash))
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata([x.sha256 for x in new_blocks])
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
node0.wait_for_block(stale_hash, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
node0.wait_for_header(hex(stale_hash), timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
block_hash = int(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[-1], 16)
assert_equal(self.nodes[0].getblockcount(), 14)
node0.wait_for_block(block_hash, timeout=3)
# Request for very old stale block should now fail
with p2p_lock:
node0.last_message.pop("block", None)
self.send_block_request(stale_hash, node0)
node0.sync_with_ping()
assert "block" not in node0.last_message
# Request for very old stale block header should now fail
with p2p_lock:
node0.last_message.pop("headers", None)
self.send_header_request(stale_hash, node0)
node0.sync_with_ping()
assert "headers" not in node0.last_message
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
node0.wait_for_block(block_hash, timeout=3)
self.send_header_request(block_hash, node0)
node0.wait_for_header(hex(block_hash), timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
| mit |
ShashaQin/erpnext | erpnext/hr/doctype/upload_attendance/upload_attendance.py | 14 | 4321 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, add_days, date_diff
from frappe import _
from frappe.utils.csvutils import UnicodeWriter
from frappe.model.document import Document
class UploadAttendance(Document):
pass
@frappe.whitelist()
def get_template():
if not frappe.has_permission("Attendance", "create"):
raise frappe.PermissionError
args = frappe.local.form_dict
w = UnicodeWriter()
w = add_header(w)
w = add_data(w, args)
# write out response as a type csv
frappe.response['result'] = cstr(w.getvalue())
frappe.response['type'] = 'csv'
frappe.response['doctype'] = "Attendance"
def add_header(w):
status = ", ".join((frappe.get_meta("Attendance").get_field("status").options or "").strip().split("\n"))
w.writerow(["Notes:"])
w.writerow(["Please do not change the template headings"])
w.writerow(["Status should be one of these values: " + status])
w.writerow(["If you are overwriting existing attendance records, 'ID' column mandatory"])
w.writerow(["ID", "Employee", "Employee Name", "Date", "Status",
"Company", "Naming Series"])
return w
def add_data(w, args):
dates = get_dates(args)
employees = get_active_employees()
existing_attendance_records = get_existing_attendance_records(args)
for date in dates:
for employee in employees:
existing_attendance = {}
if existing_attendance_records \
and tuple([date, employee.name]) in existing_attendance_records:
existing_attendance = existing_attendance_records[tuple([date, employee.name])]
row = [
existing_attendance and existing_attendance.name or "",
employee.name, employee.employee_name, date,
existing_attendance and existing_attendance.status or "", employee.company,
existing_attendance and existing_attendance.naming_series or get_naming_series(),
]
w.writerow(row)
return w
def get_dates(args):
"""get list of dates in between from date and to date"""
no_of_days = date_diff(add_days(args["to_date"], 1), args["from_date"])
dates = [add_days(args["from_date"], i) for i in range(0, no_of_days)]
return dates
def get_active_employees():
employees = frappe.db.sql("""select name, employee_name, company
from tabEmployee where docstatus < 2 and status = 'Active'""", as_dict=1)
return employees
def get_existing_attendance_records(args):
attendance = frappe.db.sql("""select name, att_date, employee, status, naming_series
from `tabAttendance` where att_date between %s and %s and docstatus < 2""",
(args["from_date"], args["to_date"]), as_dict=1)
existing_attendance = {}
for att in attendance:
existing_attendance[tuple([att.att_date, att.employee])] = att
return existing_attendance
def get_naming_series():
series = frappe.get_meta("Attendance").get_field("naming_series").options.strip().split("\n")
if not series:
frappe.throw(_("Please setup numbering series for Attendance via Setup > Numbering Series"))
return series[0]
@frappe.whitelist()
def upload():
if not frappe.has_permission("Attendance", "create"):
raise frappe.PermissionError
from frappe.utils.csvutils import read_csv_content_from_uploaded_file
from frappe.modules import scrub
rows = read_csv_content_from_uploaded_file()
rows = filter(lambda x: x and any(x), rows)
if not rows:
msg = [_("Please select a csv file")]
return {"messages": msg, "error": msg}
columns = [scrub(f) for f in rows[4]]
columns[0] = "name"
columns[3] = "att_date"
ret = []
error = False
from frappe.utils.csvutils import check_record, import_doc
for i, row in enumerate(rows[5:]):
if not row: continue
row_idx = i + 5
d = frappe._dict(zip(columns, row))
d["doctype"] = "Attendance"
if d.name:
d["docstatus"] = frappe.db.get_value("Attendance", d.name, "docstatus")
try:
check_record(d)
ret.append(import_doc(d, "Attendance", 1, row_idx, submit=True))
except Exception, e:
error = True
ret.append('Error for row (#%d) %s : %s' % (row_idx,
len(row)>1 and row[1] or "", cstr(e)))
frappe.errprint(frappe.get_traceback())
if error:
frappe.db.rollback()
else:
frappe.db.commit()
return {"messages": ret, "error": error}
| agpl-3.0 |
voltaicsca/deluge | deluge/ui/Win32IconImagePlugin.py | 14 | 8239 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Bryan Davis <casadebender+pil@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# $Id$
"""Alternate PIL plugin for dealing with Microsoft .ico files. Handles XOR
transparency masks, XP style 8bit alpha channels and Vista style PNG image
parts.
>>> import PIL.Image
>>> import Win32IconImagePlugin
>>> ico = PIL.Image.open("down.ico")
>>> print ico.info['sizes']
set([(16, 16), (48, 48), (256, 256), (32, 32)])
>>> ico.size = (16, 16)
>>> ico.show()
This implementation builds on several samples that I found around the net.
Karsten Hiddemann posted a hint on Image-SIG_ that got me started on this.
Some time later I found a `django snippet`_ by *dc* that I borrowed the
``struct.unpack`` syntax from. I also spent a lot of time looking at the
IcoImagePlugin, BmpImagePlugin, PngImagePlugin and other files from PIL.
Icon format references:
* http://en.wikipedia.org/wiki/ICO_(file_format)
* http://msdn.microsoft.com/en-us/library/ms997538.aspx
Example icon to test with `down.ico`_
.. _Image-SIG http://mail.python.org/pipermail/image-sig/2008-May/004986.html
.. _django snippet http://www.djangosnippets.org/snippets/1287/
.. _down.ico http://www.axialis.com/tutorials/iw/down.ico
"""
import logging
import struct
import PIL.Image
import PIL.ImageChops
import PIL.ImageFile
import PIL.BmpImagePlugin
import PIL.PngImagePlugin
_MAGIC = '\0\0\1\0'
log = logging.getLogger(__name__)
class Win32IcoFile (object):
"""
Decoder for Microsoft .ico files.
"""
def __init__ (self, buf):
"""
Args:
buf: file-like object containing ico file data
"""
self.buf = buf
self.entry = []
header = struct.unpack('<3H', buf.read(6))
if (0, 1) != header[:2]:
raise SyntaxError, 'not an ico file'
self.nb_items = header[2]
dir_fields = ('width', 'height', 'nb_color', 'reserved', 'planes', 'bpp',
'size', 'offset')
for i in xrange(self.nb_items):
directory = list(struct.unpack('<4B2H2I', buf.read(16)))
for j in xrange(3):
if not directory[j]:
directory[j] = 256
icon_header = dict(zip(dir_fields, directory))
icon_header['color_depth'] = (
icon_header['bpp'] or
(icon_header['nb_color'] == 16 and 4))
icon_header['dim'] = (icon_header['width'], icon_header['height'])
self.entry.append(icon_header)
#end for (read headers)
# order by size and color depth
self.entry.sort(lambda x, y: \
cmp(x['width'], y['width']) or cmp(x['color_depth'], y['color_depth']))
self.entry.reverse()
#end __init__
def sizes (self):
"""
Get a list of all available icon sizes and color depths.
"""
return set((h['width'], h['height']) for h in self.entry)
#end sizes
def get_image (self, size, bpp=False):
"""
Get an image from the icon
Args:
size: tuple of (width, height)
bpp: color depth
"""
idx = 0
for i in range(self.nb_items):
h = self.entry[i]
if size == h['dim'] and (bpp == False or bpp == h['color_depth']):
return self.frame(i)
return self.frame(0)
#end get_image
def frame (self, idx):
"""
Get the icon from frame idx
Args:
idx: Frame index
Returns:
PIL.Image
"""
header = self.entry[idx]
self.buf.seek(header['offset'])
data = self.buf.read(8)
self.buf.seek(header['offset'])
if data[:8] == PIL.PngImagePlugin._MAGIC:
# png frame
im = PIL.PngImagePlugin.PngImageFile(self.buf)
else:
# XOR + AND mask bmp frame
im = PIL.BmpImagePlugin.DibImageFile(self.buf)
log.debug("Loaded image: %s %s %s %s", im.format, im.mode, im.size,
im.info)
# change tile dimension to only encompass XOR image
im.size = im.size[0], im.size[1] / 2
d, e, o, a = im.tile[0]
im.tile[0] = d, (0,0) + im.size, o, a
# figure out where AND mask image starts
mode = a[0]
bpp = 8
for k in PIL.BmpImagePlugin.BIT2MODE.keys():
if mode == PIL.BmpImagePlugin.BIT2MODE[k][1]:
bpp = k
break
#end for
log.debug("o:%s, w:%s, h:%s, bpp:%s", o, im.size[0], im.size[1], bpp)
and_mask_offset = o + (im.size[0] * im.size[1] * (bpp / 8.0))
if 32 == bpp:
# 32-bit color depth icon image allows semitransparent areas
# PIL's DIB format ignores transparency bits, recover them
# The DIB is packed in BGRX byte order where X is the alpha channel
# Back up to start of bmp data
self.buf.seek(o)
# extract every 4th byte (eg. 3,7,11,15,...)
alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
# convert to an 8bpp grayscale image
mask = PIL.Image.frombuffer(
'L', # 8bpp
im.size, # (w, h)
alpha_bytes, # source chars
'raw', # raw decoder
('L', 0, -1) # 8bpp inverted, unpadded, reversed
)
# apply mask image as alpha channel
im = im.convert('RGBA')
im.putalpha(mask)
log.debug("image mode: %s", im.mode)
else:
# get AND image from end of bitmap
w = im.size[0]
if (w % 32) > 0:
# bitmap row data is aligned to word boundaries
w += 32 - (im.size[0] % 32)
# the total mask data is padded row size * height / bits per char
total_bytes = long((w * im.size[1]) / 8)
log.debug("tot=%d, off=%d, w=%d, size=%d",
len(data), and_mask_offset, w, total_bytes)
self.buf.seek(and_mask_offset)
maskData = self.buf.read(total_bytes)
# convert raw data to image
mask = PIL.Image.frombuffer(
'1', # 1 bpp
im.size, # (w, h)
maskData, # source chars
'raw', # raw decoder
('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed
)
# now we have two images, im is XOR image and mask is AND image
# set mask as alpha channel
im = im.convert('RGBA')
im.putalpha(mask)
log.debug("image mode: %s", im.mode)
#end if !'RGBA'
#end if (png)/else(bmp)
return im
#end frame
def __repr__ (self):
s = 'Microsoft Icon: %d images (max %dx%d %dbpp)' % (
len(self.entry), self.entry[0]['width'], self.entry[0]['height'],
self.entry[0]['bpp'])
return s
#end __repr__
#end Win32IcoFile
class Win32IconImageFile (PIL.ImageFile.ImageFile):
"""
PIL read-only image support for Microsoft .ico files.
By default the largest resolution image in the file will be loaded. This can
be changed by altering the 'size' attribute before calling 'load'.
The info dictionary has a key 'sizes' that is a list of the sizes available
in the icon file.
Handles classic, XP and Vista icon formats.
"""
format = 'ICO'
format_description = 'Microsoft icon'
def _open (self):
self.ico = Win32IcoFile(self.fp)
self.info['sizes'] = self.ico.sizes()
self.size = self.ico.entry[0]['dim']
self.load()
#end _open
def load (self):
im = self.ico.get_image(self.size)
# if tile is PNG, it won't really be loaded yet
im.load()
self.im = im.im
self.mode = im.mode
self.size = im.size
#end load
#end class Win32IconImageFile
def _accept (prefix):
"""
Quick file test helper for Image.open()
"""
return prefix[:4] == _MAGIC
#end _accept
# register our decoder with PIL
PIL.Image.register_open(Win32IconImageFile.format, Win32IconImageFile, _accept)
PIL.Image.register_extension(Win32IconImageFile.format, ".ico")
| gpl-3.0 |
tbabej/astropy | astropy/coordinates/tests/test_transformations.py | 2 | 5927 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ... import units as u
from .. import transformations as t
from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic
from .. import representation as r
from ..baseframe import frame_transform_graph
from ...tests.helper import assert_quantity_allclose as assert_allclose
from ...time import Time
#Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2,
register_graph=frame_transform_graph)
c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0],
[0, coo.ra.degree, 0],
[0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1*u.deg, dec=2*u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0],
[0, 1, 0],
[0, 0, 1]]
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.cartesian.x, 2*u.pc)
assert_allclose(c4.cartesian.y, 1*u.pc)
assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
class FakeTransform(object):
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
#cheating by adding graph elements directly that are not classes - the
#graphing algorithm still works fine with integers - it just isn't a valid
#TransformGraph
#the graph looks is a down-going diamond graph with the lower-right slightly
#heavier and a cycle from the bottom to the top
#also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print('Cached paths:', g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
#unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float('inf')
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from ...utils import NumpyRNGContext
from .. import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
#test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
#but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950)
fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS)
icrs_75 = fk4_75.transform_to(ICRS)
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
| bsd-3-clause |
beernarrd/gramps | gramps/gen/lib/person.py | 1 | 41735 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2010 Michiel D. Nauta
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Person object for Gramps.
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .primaryobj import PrimaryObject
from .citationbase import CitationBase
from .notebase import NoteBase
from .mediabase import MediaBase
from .attrbase import AttributeBase
from .addressbase import AddressBase
from .ldsordbase import LdsOrdBase
from .urlbase import UrlBase
from .tagbase import TagBase
from .name import Name
from .eventref import EventRef
from .personref import PersonRef
from .attrtype import AttributeType
from .eventroletype import EventRoleType
from .attribute import Attribute
from .const import IDENTICAL, EQUAL, DIFFERENT
from .handle import Handle
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Person class
#
#-------------------------------------------------------------------------
class Person(CitationBase, NoteBase, AttributeBase, MediaBase,
AddressBase, UrlBase, LdsOrdBase, PrimaryObject):
"""
The Person record is the Gramps in-memory representation of an
individual person. It contains all the information related to
an individual.
Person objects are usually created in one of two ways.
1. Creating a new person object, which is then initialized and added to
the database.
2. Retrieving an object from the database using the records handle.
Once a Person object has been modified, it must be committed
to the database using the database object's commit_person function,
or the changes will be lost.
"""
UNKNOWN = 2
MALE = 1
FEMALE = 0
def __init__(self, data=None):
"""
Create a new Person instance.
After initialization, most data items have empty or null values,
including the database handle.
"""
PrimaryObject.__init__(self)
CitationBase.__init__(self)
NoteBase.__init__(self)
MediaBase.__init__(self)
AttributeBase.__init__(self)
AddressBase.__init__(self)
UrlBase.__init__(self)
LdsOrdBase.__init__(self)
self.primary_name = Name()
self.event_ref_list = []
self.family_list = []
self.parent_family_list = []
self.alternate_names = []
self.person_ref_list = []
self.__gender = Person.UNKNOWN
self.death_ref_index = -1
self.birth_ref_index = -1
if data:
self.unserialize(data)
# We hold a reference to the GrampsDB so that we can maintain
# its genderStats. It doesn't get set here, but from
# GenderStats.count_person.
def __eq__(self, other):
return isinstance(other, Person) and self.handle == other.handle
def __ne__(self, other):
return not self == other
def serialize(self):
"""
Convert the data held in the Person to a Python tuple that
represents all the data elements.
This method is used to convert the object into a form that can easily
be saved to a database.
These elements may be primitive Python types (string, integers),
complex Python types (lists or tuples, or Python objects. If the
target database cannot handle complex types (such as objects or
lists), the database is responsible for converting the data into
a form that it can use.
:returns: Returns a python tuple containing the data that should
be considered persistent.
:rtype: tuple
"""
return (
self.handle, # 0
self.gramps_id, # 1
self.__gender, # 2
self.primary_name.serialize(), # 3
[name.serialize() for name in self.alternate_names], # 4
self.death_ref_index, # 5
self.birth_ref_index, # 6
[er.serialize() for er in self.event_ref_list], # 7
self.family_list, # 8
self.parent_family_list, # 9
MediaBase.serialize(self), # 10
AddressBase.serialize(self), # 11
AttributeBase.serialize(self), # 12
UrlBase.serialize(self), # 13
LdsOrdBase.serialize(self), # 14
CitationBase.serialize(self), # 15
NoteBase.serialize(self), # 16
self.change, # 17
TagBase.serialize(self), # 18
self.private, # 19
[pr.serialize() for pr in self.person_ref_list] # 20
)
@classmethod
def get_labels(cls, _):
return {
"handle": _("Handle"),
"gramps_id": _("Gramps ID"),
"gender": _("Gender"),
"primary_name": _("Primary name"),
"alternate_names": _("Alternate names"),
"death_ref_index": _("Death reference index"),
"birth_ref_index": _("Birth reference index"),
"event_ref_list": _("Event references"),
"family_list": _("Families"),
"parent_family_list": _("Parent families"),
"media_list": _("Media"),
"address_list": _("Addresses"),
"attribute_list": _("Attributes"),
"urls": _("Urls"),
"lds_ord_list": _("LDS ordinances"),
"citation_list": _("Citations"),
"note_list": _("Notes"),
"change": _("Last changed"),
"tag_list": _("Tags"),
"private": _("Private"),
"person_ref_list": _("Person references"),
"probably_alive": _("Probably alive"),
}
@classmethod
def get_schema(cls):
"""
Return the schema as a dictionary for this class.
"""
from .mediaref import MediaRef
from .address import Address
from .url import Url
from .ldsord import LdsOrd
return {
"handle": Handle("Person", "PERSON-HANDLE"),
"gramps_id": str,
"gender": int,
"primary_name": Name,
"alternate_names": [Name],
"death_ref_index": int,
"birth_ref_index": int,
"event_ref_list": [EventRef],
"family_list": [Handle("Family", "FAMILY-HANDLE")],
"parent_family_list": [Handle("Family", "FAMILY-HANDLE")],
"media_list": [MediaRef],
"address_list": [Address],
"attribute_list": [Attribute],
"urls": [Url],
"lds_ord_list": [LdsOrd],
"citation_list": [Handle("Citation", "CITATION-HANDLE")],
"note_list": [Handle("Note", "NOTE-HANDLE")],
"change": int,
"tag_list": [Handle("Tag", "TAG-HANDLE")],
"private": bool,
"person_ref_list": [PersonRef]
}
def unserialize(self, data):
"""
Convert the data held in a tuple created by the serialize method
back into the data in a Person object.
:param data: tuple containing the persistent data associated the
Person object
:type data: tuple
"""
(self.handle, # 0
self.gramps_id, # 1
self.__gender, # 2
primary_name, # 3
alternate_names, # 4
self.death_ref_index, # 5
self.birth_ref_index, # 6
event_ref_list, # 7
self.family_list, # 8
self.parent_family_list, # 9
media_list, # 10
address_list, # 11
attribute_list, # 12
urls, # 13
lds_ord_list, # 14
citation_list, # 15
note_list, # 16
self.change, # 17
tag_list, # 18
self.private, # 19
person_ref_list, # 20
) = data
self.primary_name = Name()
self.primary_name.unserialize(primary_name)
self.alternate_names = [Name().unserialize(name)
for name in alternate_names]
self.event_ref_list = [EventRef().unserialize(er)
for er in event_ref_list]
self.person_ref_list = [PersonRef().unserialize(pr)
for pr in person_ref_list]
MediaBase.unserialize(self, media_list)
LdsOrdBase.unserialize(self, lds_ord_list)
AddressBase.unserialize(self, address_list)
AttributeBase.unserialize(self, attribute_list)
UrlBase.unserialize(self, urls)
CitationBase.unserialize(self, citation_list)
NoteBase.unserialize(self, note_list)
TagBase.unserialize(self, tag_list)
return self
def _has_handle_reference(self, classname, handle):
"""
Return True if the object has reference to a given handle of given
primary object type.
:param classname: The name of the primary object class.
:type classname: str
:param handle: The handle to be checked.
:type handle: str
:returns: Returns whether the object has reference to this handle of
this object type.
:rtype: bool
"""
if classname == 'Event':
return any(ref.ref == handle for ref in self.event_ref_list)
elif classname == 'Person':
return any(ref.ref == handle for ref in self.person_ref_list)
elif classname == 'Family':
return any(ref == handle
for ref in self.family_list + self.parent_family_list +
[ordinance.famc for ordinance in self.lds_ord_list])
elif classname == 'Place':
return any(ordinance.place == handle
for ordinance in self.lds_ord_list)
return False
def _remove_handle_references(self, classname, handle_list):
if classname == 'Event':
# Keep a copy of the birth and death references
birth_ref = self.get_birth_ref()
death_ref = self.get_death_ref()
new_list = [ref for ref in self.event_ref_list
if ref.ref not in handle_list]
# If deleting removing the reference to the event
# to which birth or death ref_index points, unset the index
if (self.birth_ref_index != -1
and self.event_ref_list[self.birth_ref_index].ref
in handle_list):
self.set_birth_ref(None)
if (self.death_ref_index != -1
and self.event_ref_list[self.death_ref_index].ref
in handle_list):
self.set_death_ref(None)
self.event_ref_list = new_list
# Reset the indexes after deleting the event from even_ref_list
if self.birth_ref_index != -1:
self.set_birth_ref(birth_ref)
if self.death_ref_index != -1:
self.set_death_ref(death_ref)
elif classname == 'Person':
new_list = [ref for ref in self.person_ref_list
if ref.ref not in handle_list]
self.person_ref_list = new_list
elif classname == 'Family':
new_list = [handle for handle in self.family_list
if handle not in handle_list]
self.family_list = new_list
new_list = [handle for handle in self.parent_family_list
if handle not in handle_list]
self.parent_family_list = new_list
for ordinance in self.lds_ord_list:
if ordinance.famc in handle_list:
ordinance.famc = None
elif classname == 'Place':
for ordinance in self.lds_ord_list:
if ordinance.place in handle_list:
ordinance.place = None
def _replace_handle_reference(self, classname, old_handle, new_handle):
if classname == 'Event':
refs_list = [ref.ref for ref in self.event_ref_list]
new_ref = None
if new_handle in refs_list:
new_ref = self.event_ref_list[refs_list.index(new_handle)]
n_replace = refs_list.count(old_handle)
for ix_replace in range(n_replace):
idx = refs_list.index(old_handle)
self.event_ref_list[idx].ref = new_handle
refs_list[idx] = new_handle
if new_ref:
evt_ref = self.event_ref_list[idx]
equi = new_ref.is_equivalent(evt_ref)
if equi != DIFFERENT:
if equi == EQUAL:
new_ref.merge(evt_ref)
self.event_ref_list.pop(idx)
refs_list.pop(idx)
if idx < self.birth_ref_index:
self.birth_ref_index -= 1
elif idx == self.birth_ref_index:
self.birth_ref_index = -1
# birth_ref_index should be recalculated which
# needs database access!
if idx < self.death_ref_index:
self.death_ref_index -= 1
elif idx == self.death_ref_index:
self.death_ref_index = -1
# death_ref_index should be recalculated which
# needs database access!
elif classname == 'Person':
refs_list = [ref.ref for ref in self.person_ref_list]
new_ref = None
if new_handle in refs_list:
new_ref = self.person_ref_list[refs_list.index(new_handle)]
n_replace = refs_list.count(old_handle)
for ix_replace in range(n_replace):
idx = refs_list.index(old_handle)
self.person_ref_list[idx].ref = new_handle
refs_list[idx] = new_handle
if new_ref:
person_ref = self.person_ref_list[idx]
equi = new_ref.is_equivalent(person_ref)
if equi != DIFFERENT:
if equi == EQUAL:
new_ref.merge(person_ref)
self.person_ref_list.pop(idx)
refs_list.pop(idx)
elif classname == 'Family':
while old_handle in self.family_list:
ix = self.family_list.index(old_handle)
self.family_list[ix] = new_handle
while old_handle in self.parent_family_list:
ix = self.parent_family_list.index(old_handle)
self.parent_family_list[ix] = new_handle
handle_list = [ordinance.famc for ordinance in self.lds_ord_list]
while old_handle in handle_list:
ix = handle_list.index(old_handle)
self.lds_ord_list[ix].famc = new_handle
handle_list[ix] = ''
elif classname == "Place":
handle_list = [ordinance.place for ordinance in self.lds_ord_list]
while old_handle in handle_list:
ix = handle_list.index(old_handle)
self.lds_ord_list[ix].place = new_handle
handle_list[ix] = ''
def get_text_data_list(self):
"""
Return the list of all textual attributes of the object.
:returns: Returns the list of all textual attributes of the object.
:rtype: list
"""
return [self.gramps_id]
def get_text_data_child_list(self):
"""
Return the list of child objects that may carry textual data.
:returns: Returns the list of child objects that may carry textual data.
:rtype: list
"""
check_list = self.lds_ord_list
add_list = [_f for _f in check_list if _f]
return ([self.primary_name] +
self.media_list +
self.alternate_names +
self.address_list +
self.attribute_list +
self.urls +
self.event_ref_list +
add_list +
self.person_ref_list
)
def get_citation_child_list(self):
"""
Return the list of child secondary objects that may refer citations.
:returns: Returns the list of child secondary child objects that may
refer citations.
:rtype: list
"""
return ([self.primary_name] +
self.media_list +
self.alternate_names +
self.address_list +
self.attribute_list +
self.lds_ord_list +
self.person_ref_list +
self.event_ref_list
)
def get_note_child_list(self):
"""
Return the list of child secondary objects that may refer notes.
:returns: Returns the list of child secondary child objects that may
refer notes.
:rtype: list
"""
return ([self.primary_name] +
self.media_list +
self.alternate_names +
self.address_list +
self.attribute_list +
self.lds_ord_list +
self.person_ref_list +
self.event_ref_list
)
def get_referenced_handles(self):
"""
Return the list of (classname, handle) tuples for all directly
referenced primary objects.
:returns: List of (classname, handle) tuples for referenced objects.
:rtype: list
"""
return [('Family', handle) for handle in
(self.family_list + self.parent_family_list)] + (
self.get_referenced_note_handles() +
self.get_referenced_citation_handles() +
self.get_referenced_tag_handles()
)
def get_handle_referents(self):
"""
Return the list of child objects which may, directly or through
their children, reference primary objects.
:returns: Returns the list of objects referencing primary objects.
:rtype: list
"""
return ([self.primary_name] +
self.media_list +
self.alternate_names +
self.address_list +
self.attribute_list +
self.lds_ord_list +
self.person_ref_list +
self.event_ref_list
)
def merge(self, acquisition):
"""
Merge the content of acquisition into this person.
:param acquisition: The person to merge with the present person.
:type acquisition: Person
"""
acquisition_id = acquisition.get_gramps_id()
if acquisition_id:
attr = Attribute()
attr.set_type(_("Merged Gramps ID"))
attr.set_value(acquisition.get_gramps_id())
self.add_attribute(attr)
self._merge_privacy(acquisition)
acquisition.alternate_names.insert(0, acquisition.get_primary_name())
self._merge_alternate_names(acquisition)
self._merge_event_ref_list(acquisition)
self._merge_lds_ord_list(acquisition)
self._merge_media_list(acquisition)
self._merge_address_list(acquisition)
self._merge_attribute_list(acquisition)
self._merge_url_list(acquisition)
self._merge_person_ref_list(acquisition)
self._merge_note_list(acquisition)
self._merge_citation_list(acquisition)
self._merge_tag_list(acquisition)
list(map(self.add_parent_family_handle,
acquisition.get_parent_family_handle_list()))
list(map(self.add_family_handle, acquisition.get_family_handle_list()))
def set_primary_name(self, name):
"""
Set the primary name of the Person to the specified :class:`~.name.Name`
instance.
:param name: :class:`~.name.Name` to be assigned to the person
:type name: :class:`~.name.Name`
"""
self.primary_name = name
def get_primary_name(self):
"""
Return the :class:`~.name.Name` instance marked as the Person's primary
name.
:returns: Returns the primary name
:rtype: :class:`~.name.Name`
"""
return self.primary_name
def get_alternate_names(self):
"""
Return the list of alternate :class:`~.name.Name` instances.
:returns: List of :class:`~.name.Name` instances
:rtype: list
"""
return self.alternate_names
def set_alternate_names(self, alt_name_list):
"""
Change the list of alternate names to the passed list.
:param alt_name_list: List of :class:`~.name.Name` instances
:type alt_name_list: list
"""
self.alternate_names = alt_name_list
def _merge_alternate_names(self, acquisition):
"""
Merge the list of alternate names from acquisition with our own.
:param acquisition: the list of alternate names of this object will be
merged with the current alternate name list.
:rtype acquisition: Person
"""
name_list = self.alternate_names[:]
primary_name = self.get_primary_name()
if primary_name and not primary_name.is_empty():
name_list.insert(0, primary_name)
for addendum in acquisition.get_alternate_names():
for name in name_list:
equi = name.is_equivalent(addendum)
if equi == IDENTICAL:
break
elif equi == EQUAL:
name.merge(addendum)
break
else:
self.alternate_names.append(addendum)
def add_alternate_name(self, name):
"""
Add a :class:`~.name.Name` instance to the list of alternative names.
:param name: :class:`~.name.Name` to add to the list
:type name: :class:`~.name.Name`
"""
self.alternate_names.append(name)
def get_nick_name(self):
for name in [self.get_primary_name()] + self.get_alternate_names():
if name.get_nick_name():
return name.get_nick_name()
for attr in self.attribute_list:
if int(attr.type) == AttributeType.NICKNAME:
return attr.get_value()
return ''
def set_gender(self, gender):
"""
Set the gender of the Person.
:param gender: Assigns the Person's gender to one of the
following constants:
- Person.MALE
- Person.FEMALE
- Person.UNKNOWN
:type gender: int
"""
if gender not in (Person.MALE, Person.FEMALE, Person.UNKNOWN):
raise ValueError('Attempt to assign invalid gender')
self.__gender = gender
def get_gender(self):
"""
Return the gender of the Person.
:returns: Returns one of the following constants:
- Person.MALE
- Person.FEMALE
- Person.UNKNOWN
:rtype: int
"""
return self.__gender
gender = property(get_gender, set_gender, None,
'Returns or sets the gender of the person')
def set_birth_ref(self, event_ref):
"""
Assign the birth event to the Person object.
This is accomplished by assigning the :class:`~.eventref.EventRef` of
the birth event in the current database.
:param event_ref: the :class:`~.eventref.EventRef` object associated
with the Person's birth.
:type event_ref: EventRef
"""
if event_ref and not isinstance(event_ref, EventRef):
raise ValueError("Expecting EventRef instance")
if event_ref is None:
self.birth_ref_index = -1
return
# check whether we already have this ref in the list
for self.birth_ref_index, ref in enumerate(self.event_ref_list):
if event_ref.is_equal(ref):
return # Note: self.birth_ref_index already set
self.event_ref_list.append(event_ref)
self.birth_ref_index = len(self.event_ref_list)-1
def set_death_ref(self, event_ref):
"""
Assign the death event to the Person object.
This is accomplished by assigning the :class:`~.eventref.EventRef` of
the death event in the current database.
:param event_ref: the :class:`~.eventref.EventRef` object associated
with the Person's death.
:type event_ref: EventRef
"""
if event_ref and not isinstance(event_ref, EventRef):
raise ValueError("Expecting EventRef instance")
if event_ref is None:
self.death_ref_index = -1
return
# check whether we already have this ref in the list
for self.death_ref_index, ref in enumerate(self.event_ref_list):
if event_ref.is_equal(ref):
return # Note: self.death_ref_index already set
self.event_ref_list.append(event_ref)
self.death_ref_index = len(self.event_ref_list)-1
def get_birth_ref(self):
"""
Return the :class:`~.eventref.EventRef` for Person's birth event.
This should correspond to an :class:`~.event.Event` in the database's
:class:`~.event.Event` list.
:returns: Returns the birth :class:`~.eventref.EventRef` or None if no
birth :class:`~.event.Event` has been assigned.
:rtype: EventRef
"""
if 0 <= self.birth_ref_index < len(self.event_ref_list):
return self.event_ref_list[self.birth_ref_index]
else:
return None
def get_death_ref(self):
"""
Return the :class:`~.eventref.EventRef` for the Person's death event.
This should correspond to an :class:`~.event.Event` in the database's
:class:`~.event.Event` list.
:returns: Returns the death :class:`~.eventref.EventRef` or None if no
death :class:`~.event.Event` has been assigned.
:rtype: event_ref
"""
if 0 <= self.death_ref_index < len(self.event_ref_list):
return self.event_ref_list[self.death_ref_index]
else:
return None
def add_event_ref(self, event_ref):
"""
Add the :class:`~.eventref.EventRef` to the Person instance's
:class:`~.eventref.EventRef` list.
This is accomplished by assigning the :class:`~.eventref.EventRef` of a
valid :class:`~.event.Event` in the current database.
:param event_ref: the :class:`~.eventref.EventRef` to be added to the
Person's :class:`~.eventref.EventRef` list.
:type event_ref: EventRef
"""
if event_ref and not isinstance(event_ref, EventRef):
raise ValueError("Expecting EventRef instance")
# check whether we already have this ref in the list
if not any(event_ref.is_equal(ref) for ref in self.event_ref_list):
self.event_ref_list.append(event_ref)
def get_event_ref_list(self):
"""
Return the list of :class:`~.eventref.EventRef` objects associated with
:class:`~.event.Event` instances.
:returns: Returns the list of :class:`~.eventref.EventRef` objects
associated with the Person instance.
:rtype: list
"""
return self.event_ref_list
def get_primary_event_ref_list(self):
"""
Return the list of :class:`~.eventref.EventRef` objects associated with
:class:`~.event.Event` instances that have been marked as primary
events.
:returns: Returns generator of :class:`~.eventref.EventRef` objects
associated with the Person instance.
:rtype: generator
"""
return (ref for ref in self.event_ref_list
if ref.get_role() == EventRoleType.PRIMARY
)
def set_event_ref_list(self, event_ref_list):
"""
Set the Person instance's :class:`~.eventref.EventRef` list to the
passed list.
:param event_ref_list: List of valid :class:`~.eventref.EventRef`
objects.
:type event_ref_list: list
"""
self.event_ref_list = event_ref_list
def _merge_event_ref_list(self, acquisition):
"""
Merge the list of event references from acquisition with our own.
:param acquisition: the event references list of this object will be
merged with the current event references list.
:rtype acquisition: Person
"""
eventref_list = self.event_ref_list[:]
for idx, addendum in enumerate(acquisition.get_event_ref_list()):
for eventref in eventref_list:
equi = eventref.is_equivalent(addendum)
if equi == IDENTICAL:
break
elif equi == EQUAL:
eventref.merge(addendum)
break
else:
self.event_ref_list.append(addendum)
if (self.birth_ref_index == -1 and
idx == acquisition.birth_ref_index):
self.birth_ref_index = len(self.event_ref_list) - 1
if (self.death_ref_index == -1 and
idx == acquisition.death_ref_index):
self.death_ref_index = len(self.event_ref_list) - 1
def add_family_handle(self, family_handle):
"""
Add the :class:`~.family.Family` handle to the Person instance's
:class:`~.family.Family` list.
This is accomplished by assigning the handle of a valid
:class:`~.family.Family` in the current database.
Adding a :class:`~.family.Family` handle to a Person does not
automatically update the corresponding :class:`~.family.Family`. The
developer is responsible to make sure that when a
:class:`~.family.Family` is added to Person, that the Person is assigned
to either the father or mother role in the :class:`~.family.Family`.
:param family_handle: handle of the :class:`~.family.Family` to be added
to the Person's :class:`~.family.Family` list.
:type family_handle: str
"""
if family_handle not in self.family_list:
self.family_list.append(family_handle)
def set_preferred_family_handle(self, family_handle):
"""
Set the family_handle specified to be the preferred
:class:`~.family.Family`.
The preferred :class:`~.family.Family` is determined by the first
:class:`~.family.Family` in the :class:`~.family.Family` list, and is
typically used to indicate the preferred :class:`~.family.Family` for
navigation or reporting.
The family_handle must already be in the list, or the function
call has no effect.
:param family_handle: Handle of the :class:`~.family.Family` to make the
preferred :class:`~.family.Family`.
:type family_handle: str
:returns: True if the call succeeded, False if the family_handle
was not already in the :class:`~.family.Family` list.
:rtype: bool
"""
if family_handle in self.family_list:
self.family_list.remove(family_handle)
self.family_list = [family_handle] + self.family_list
return True
else:
return False
def get_family_handle_list(self):
"""
Return the list of :class:`~.family.Family` handles in which the person
is a parent or spouse.
:returns: Returns the list of handles corresponding to the
:class:`~.family.Family` records with which the person
is associated.
:rtype: list
"""
return self.family_list
def set_family_handle_list(self, family_list):
"""
Assign the passed list to the Person's list of families in which it is
a parent or spouse.
:param family_list: List of :class:`~.family.Family` handles to be
associated with the Person
:type family_list: list
"""
self.family_list = family_list
def clear_family_handle_list(self):
"""
Remove all :class:`~.family.Family` handles from the
:class:`~.family.Family` list.
"""
self.family_list = []
def remove_family_handle(self, family_handle):
"""
Remove the specified :class:`~.family.Family` handle from the list of
marriages/partnerships.
If the handle does not exist in the list, the operation has no effect.
:param family_handle: :class:`~.family.Family` handle to remove from
the list
:type family_handle: str
:returns: True if the handle was removed, False if it was not
in the list.
:rtype: bool
"""
if family_handle in self.family_list:
self.family_list.remove(family_handle)
return True
else:
return False
def get_parent_family_handle_list(self):
"""
Return the list of :class:`~.family.Family` handles in which the person
is a child.
:returns: Returns the list of handles corresponding to the
:class:`~.family.Family` records with which the person is a
child.
:rtype: list
"""
return self.parent_family_list
def set_parent_family_handle_list(self, family_list):
"""
Return the list of :class:`~.family.Family` handles in which the person
is a child.
:returns: Returns the list of handles corresponding to the
:class:`~.family.Family` records with which the person is a
child.
:rtype: list
"""
self.parent_family_list = family_list
def add_parent_family_handle(self, family_handle):
"""
Add the :class:`~.family.Family` handle to the Person instance's list of
families in which it is a child.
This is accomplished by assigning the handle of a valid
:class:`~.family.Family` in the current database.
Adding a :class:`~.family.Family` handle to a Person does not
automatically update the corresponding :class:`~.family.Family`. The
developer is responsible to make sure that when a
:class:`~.family.Family` is added to Person, that the Person is
added to the :class:`~.family.Family` instance's child list.
:param family_handle: handle of the :class:`~.family.Family` to be added
to the Person's :class:`~.family.Family` list.
:type family_handle: str
"""
if not isinstance(family_handle, str):
raise ValueError("Expecting handle, obtained %s" % str(family_handle))
if family_handle not in self.parent_family_list:
self.parent_family_list.append(family_handle)
def clear_parent_family_handle_list(self):
"""
Remove all :class:`~.family.Family` handles from the parent
:class:`~.family.Family` list.
"""
self.parent_family_list = []
def remove_parent_family_handle(self, family_handle):
"""
Remove the specified :class:`~.family.Family` handle from the list of
parent families (families in which the parent is a child).
If the handle does not exist in the list, the operation has no effect.
:param family_handle: :class:`~.family.Family` handle to remove from the
list
:type family_handle: str
:returns: Returns a tuple of three strings, consisting of the
removed handle, relationship to mother, and relationship
to father. None is returned if the handle is not in the
list.
:rtype: tuple
"""
if family_handle in self.parent_family_list:
self.parent_family_list.remove(family_handle)
return True
else:
return False
def set_main_parent_family_handle(self, family_handle):
"""
Set the main :class:`~.family.Family` in which the Person is a child.
The main :class:`~.family.Family` is the :class:`~.family.Family`
typically used for reports and navigation. This is accomplished by
moving the :class:`~.family.Family` to the beginning of the list. The
family_handle must be in the list for this to have any effect.
:param family_handle: handle of the :class:`~.family.Family` to be
marked as the main :class:`~.family.Family`
:type family_handle: str
:returns: Returns True if the assignment has successful
:rtype: bool
"""
if family_handle in self.parent_family_list:
self.parent_family_list.remove(family_handle)
self.parent_family_list = [family_handle] + self.parent_family_list
return True
else:
return False
def get_main_parents_family_handle(self):
"""
Return the handle of the :class:`~.family.Family` considered to be the
main :class:`~.family.Family` in which the Person is a child.
:returns: Returns the family_handle if a family_handle exists,
If no :class:`~.family.Family` is assigned, None is returned
:rtype: str
"""
if self.parent_family_list:
return self.parent_family_list[0]
else:
return None
def add_person_ref(self, person_ref):
"""
Add the :class:`~.personref.PersonRef` to the Person instance's
:class:`~.personref.PersonRef` list.
:param person_ref: the :class:`~.personref.PersonRef` to be added to the
Person's :class:`~.personref.PersonRef` list.
:type person_ref: PersonRef
"""
if person_ref and not isinstance(person_ref, PersonRef):
raise ValueError("Expecting PersonRef instance")
self.person_ref_list.append(person_ref)
def get_person_ref_list(self):
"""
Return the list of :class:`~.personref.PersonRef` objects.
:returns: Returns the list of :class:`~.personref.PersonRef` objects.
:rtype: list
"""
return self.person_ref_list
def set_person_ref_list(self, person_ref_list):
"""
Set the Person instance's :class:`~.personref.PersonRef` list to the
passed list.
:param person_ref_list: List of valid :class:`~.personref.PersonRef`
objects
:type person_ref_list: list
"""
self.person_ref_list = person_ref_list
def _merge_person_ref_list(self, acquisition):
"""
Merge the list of person references from acquisition with our own.
:param acquisition: the list of person references of this person will be
merged with the current person references list.
:rtype acquisition: Person
"""
personref_list = self.person_ref_list[:]
for addendum in acquisition.get_person_ref_list():
for personref in personref_list:
equi = personref.is_equivalent(addendum)
if equi == IDENTICAL:
break
elif equi == EQUAL:
personref.merge(addendum)
break
else:
self.person_ref_list.append(addendum)
| gpl-2.0 |
rooshilp/CMPUT410W15-project | testenv/lib/python2.7/site-packages/PIL/IcoImagePlugin.py | 13 | 9206 | #
# The Python Imaging Library.
# $Id$
#
# Windows Icon support for PIL
#
# History:
# 96-05-27 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
# <casadebender@gmail.com>.
# https://code.google.com/p/casadebender/wiki/Win32IconImagePlugin
#
# Icon format references:
# * http://en.wikipedia.org/wiki/ICO_(file_format)
# * http://msdn.microsoft.com/en-us/library/ms997538.aspx
__version__ = "0.1"
import struct
from io import BytesIO
from PIL import Image, ImageFile, BmpImagePlugin, PngImagePlugin, _binary
from math import log, ceil
#
# --------------------------------------------------------------------
i8 = _binary.i8
i16 = _binary.i16le
i32 = _binary.i32le
_MAGIC = b"\0\0\1\0"
def _save(im, fp, filename):
fp.write(_MAGIC) # (2+2)
sizes = im.encoderinfo.get("sizes",
[(16, 16), (24, 24), (32, 32), (48, 48),
(64, 64), (128, 128), (255, 255)])
width, height = im.size
filter(lambda x: False if (x[0] > width or x[1] > height or
x[0] > 255 or x[1] > 255) else True, sizes)
sizes = sorted(sizes, key=lambda x: x[0])
fp.write(struct.pack("H", len(sizes))) # idCount(2)
offset = fp.tell() + len(sizes)*16
for size in sizes:
width, height = size
fp.write(struct.pack("B", width)) # bWidth(1)
fp.write(struct.pack("B", height)) # bHeight(1)
fp.write(b"\0") # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
fp.write(struct.pack("H", 32)) # wBitCount(2)
image_io = BytesIO()
tmp = im.copy()
tmp.thumbnail(size, Image.LANCZOS)
tmp.save(image_io, "png")
image_io.seek(0)
image_bytes = image_io.read()
bytes_len = len(image_bytes)
fp.write(struct.pack("I", bytes_len)) # dwBytesInRes(4)
fp.write(struct.pack("I", offset)) # dwImageOffset(4)
current = fp.tell()
fp.seek(offset)
fp.write(image_bytes)
offset = offset + bytes_len
fp.seek(current)
def _accept(prefix):
return prefix[:4] == _MAGIC
class IcoFile:
def __init__(self, buf):
"""
Parse image from file-like object containing ico file data
"""
# check magic
s = buf.read(6)
if not _accept(s):
raise SyntaxError("not an ICO file")
self.buf = buf
self.entry = []
# Number of items in file
self.nb_items = i16(s[4:])
# Get headers for each item
for i in range(self.nb_items):
s = buf.read(16)
icon_header = {
'width': i8(s[0]),
'height': i8(s[1]),
'nb_color': i8(s[2]), # No. of colors in image (0 if >=8bpp)
'reserved': i8(s[3]),
'planes': i16(s[4:]),
'bpp': i16(s[6:]),
'size': i32(s[8:]),
'offset': i32(s[12:])
}
# See Wikipedia
for j in ('width', 'height'):
if not icon_header[j]:
icon_header[j] = 256
# See Wikipedia notes about color depth.
# We need this just to differ images with equal sizes
icon_header['color_depth'] = (icon_header['bpp'] or
(icon_header['nb_color'] != 0 and
ceil(log(icon_header['nb_color'],
2))) or 256)
icon_header['dim'] = (icon_header['width'], icon_header['height'])
icon_header['square'] = (icon_header['width'] *
icon_header['height'])
self.entry.append(icon_header)
self.entry = sorted(self.entry, key=lambda x: x['color_depth'])
# ICO images are usually squares
# self.entry = sorted(self.entry, key=lambda x: x['width'])
self.entry = sorted(self.entry, key=lambda x: x['square'])
self.entry.reverse()
def sizes(self):
"""
Get a list of all available icon sizes and color depths.
"""
return set((h['width'], h['height']) for h in self.entry)
def getimage(self, size, bpp=False):
"""
Get an image from the icon
"""
for (i, h) in enumerate(self.entry):
if size == h['dim'] and (bpp is False or bpp == h['color_depth']):
return self.frame(i)
return self.frame(0)
def frame(self, idx):
"""
Get an image from frame idx
"""
header = self.entry[idx]
self.buf.seek(header['offset'])
data = self.buf.read(8)
self.buf.seek(header['offset'])
if data[:8] == PngImagePlugin._MAGIC:
# png frame
im = PngImagePlugin.PngImageFile(self.buf)
else:
# XOR + AND mask bmp frame
im = BmpImagePlugin.DibImageFile(self.buf)
# change tile dimension to only encompass XOR image
im.size = (im.size[0], int(im.size[1] / 2))
d, e, o, a = im.tile[0]
im.tile[0] = d, (0, 0) + im.size, o, a
# figure out where AND mask image starts
mode = a[0]
bpp = 8
for k in BmpImagePlugin.BIT2MODE.keys():
if mode == BmpImagePlugin.BIT2MODE[k][1]:
bpp = k
break
if 32 == bpp:
# 32-bit color depth icon image allows semitransparent areas
# PIL's DIB format ignores transparency bits, recover them.
# The DIB is packed in BGRX byte order where X is the alpha
# channel.
# Back up to start of bmp data
self.buf.seek(o)
# extract every 4th byte (eg. 3,7,11,15,...)
alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
# convert to an 8bpp grayscale image
mask = Image.frombuffer(
'L', # 8bpp
im.size, # (w, h)
alpha_bytes, # source chars
'raw', # raw decoder
('L', 0, -1) # 8bpp inverted, unpadded, reversed
)
else:
# get AND image from end of bitmap
w = im.size[0]
if (w % 32) > 0:
# bitmap row data is aligned to word boundaries
w += 32 - (im.size[0] % 32)
# the total mask data is
# padded row size * height / bits per char
and_mask_offset = o + int(im.size[0] * im.size[1] *
(bpp / 8.0))
total_bytes = int((w * im.size[1]) / 8)
self.buf.seek(and_mask_offset)
maskData = self.buf.read(total_bytes)
# convert raw data to image
mask = Image.frombuffer(
'1', # 1 bpp
im.size, # (w, h)
maskData, # source chars
'raw', # raw decoder
('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed
)
# now we have two images, im is XOR image and mask is AND image
# apply mask image as alpha channel
im = im.convert('RGBA')
im.putalpha(mask)
return im
##
# Image plugin for Windows Icon files.
class IcoImageFile(ImageFile.ImageFile):
"""
PIL read-only image support for Microsoft Windows .ico files.
By default the largest resolution image in the file will be loaded. This
can be changed by altering the 'size' attribute before calling 'load'.
The info dictionary has a key 'sizes' that is a list of the sizes available
in the icon file.
Handles classic, XP and Vista icon formats.
This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
<casadebender@gmail.com>.
https://code.google.com/p/casadebender/wiki/Win32IconImagePlugin
"""
format = "ICO"
format_description = "Windows Icon"
def _open(self):
self.ico = IcoFile(self.fp)
self.info['sizes'] = self.ico.sizes()
self.size = self.ico.entry[0]['dim']
self.load()
def load(self):
im = self.ico.getimage(self.size)
# if tile is PNG, it won't really be loaded yet
im.load()
self.im = im.im
self.mode = im.mode
self.size = im.size
def load_seek(self):
# Flage the ImageFile.Parser so that it
# just does all the decode at the end.
pass
#
# --------------------------------------------------------------------
Image.register_open("ICO", IcoImageFile, _accept)
Image.register_save("ICO", _save)
Image.register_extension("ICO", ".ico")
| gpl-2.0 |
yhekma/datamounter | ansible_fetcher.py | 1 | 2718 | #!/usr/bin/env python
try:
import argparse
except ImportError:
from local_libs import argparse_local as argparse
import ConfigParser
from dlib.ansible_helpers import flatten_ansible_struct, fetch_struct, run_custom_command, gut_struct, save_struct
def load_ini(path):
"""
:param path: Absolute path of the ini file
:type path: str
:rtype: Configparser.RawConfigParser
"""
config = ConfigParser.RawConfigParser()
config.read(path)
result = {}
for h in config.sections():
for item in config.items(h):
try:
result[h][item[0]] = item[1]
except KeyError:
result[h] = {item[0]: item[1]}
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Fetch information from remote systems using Ansible")
required = parser.add_argument_group('required arguments')
required.add_argument("--pattern", "-p", dest="pattern", default=False, required=True,
help="Pattern to extract info from. Needed when generating a cache file and when not using "
"a cache file")
parser.add_argument("--retries", "-r", dest="retries", default=3, required=False,
help="Optional number of retries to contact unreachable hosts")
required.add_argument("-f", "--filename", dest="filename", required=True,
help="Destination filename for the json data.")
parser.add_argument("--custom", required=False,
help="Optional ini file with custom commands to run on remote host which output to expose. "
"Files will show up under custom_facts/.",
default=None)
parser.add_argument("--skeleton", "-s", action="store_true", required=False, default=False,
help="Remove all values from the datastructure, essentially leaving only the structure "
"itself. Useful in combination with --realtime")
args = parser.parse_args()
if args.custom:
cust_input = load_ini(args.custom)
custom_commands = {}
for host in cust_input.keys():
for filename in cust_input[host].keys():
custom_commands[filename] = run_custom_command(host, cust_input[host][filename], args.pattern,
args.skeleton)
else:
custom_commands = None
tempstruct = fetch_struct(args.pattern, args.retries)
struct = flatten_ansible_struct(tempstruct, custom_commands)
if args.skeleton:
gut_struct(struct)
save_struct(args.filename, struct)
| gpl-2.0 |
askhl/ase | ase/io/dacapo.py | 6 | 2489 | import numpy as np
from ase.calculators.singlepoint import SinglePointCalculator
from ase.atom import Atom
from ase.atoms import Atoms
def read_dacapo_text(fileobj):
if isinstance(fileobj, str):
fileobj = open(fileobj)
lines = fileobj.readlines()
i = lines.index(' Structure: A1 A2 A3\n')
cell = np.array([[float(w) for w in line.split()[2:5]]
for line in lines[i + 1:i + 4]]).transpose()
i = lines.index(' Structure: >> Ionic positions/velocities ' +
'in cartesian coordinates <<\n')
atoms = []
for line in lines[i + 4:]:
words = line.split()
if len(words) != 9:
break
Z, x, y, z = words[2:6]
atoms.append(Atom(int(Z), [float(x), float(y), float(z)]))
atoms = Atoms(atoms, cell=cell.tolist())
try:
i = lines.index(
' DFT: CPU time Total energy\n')
except ValueError:
pass
else:
column = lines[i + 3].split().index('selfcons') - 1
try:
i2 = lines.index(' ANALYSIS PART OF CODE\n', i)
except ValueError:
pass
else:
while i2 > i:
if lines[i2].startswith(' DFT:'):
break
i2 -= 1
energy = float(lines[i2].split()[column])
atoms.set_calculator(SinglePointCalculator(atoms, energy=energy))
return atoms
def read_dacapo(filename):
from ase.io.pupynere import NetCDFFile
nc = NetCDFFile(filename)
dims = nc.dimensions
vars = nc.variables
cell = vars['UnitCell'][-1]
try:
magmoms = vars['InitialAtomicMagneticMoment'][:]
except KeyError:
magmoms = None
try:
tags = vars['AtomTags'][:]
except KeyError:
tags = None
atoms = Atoms(scaled_positions=vars['DynamicAtomPositions'][-1],
symbols=[(a + b).strip()
for a, b in vars['DynamicAtomSpecies'][:]],
cell=cell,
magmoms=magmoms,
tags=tags,
pbc=True)
try:
energy = vars['TotalEnergy'][-1]
force = vars['DynamicAtomForces'][-1]
except KeyError:
energy = None
force = None
calc = SinglePointCalculator(atoms, energy=energy, forces=force) ### Fixme magmoms
atoms.set_calculator(calc)
return atoms
| gpl-2.0 |
le9i0nx/ansible | lib/ansible/plugins/lookup/template.py | 78 | 3824 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: template
author: Michael DeHaan <michael.dehaan@gmail.com>
version_added: "0.9"
short_description: retrieve contents of file after templating with Jinja2
description:
- this is mostly a noop, to be used as a with_list loop when you dont want the content transformed in any way.
options:
_terms:
description: list of files to template
"""
EXAMPLES = """
- name: show templating results
debug: msg="{{ lookup('template', './some_template.j2') }}
"""
RETURN = """
_raw:
description: file(s) content after templating
"""
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_text
from ansible.template import generate_ansible_template_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
convert_data_p = kwargs.get('convert_data', True)
lookup_template_vars = kwargs.get('template_vars', {})
ret = []
for term in terms:
display.debug("File lookup term: %s" % term)
lookupfile = self.find_file_in_search_path(variables, 'templates', term)
display.vvvv("File lookup using %s as file" % lookupfile)
if lookupfile:
with open(to_bytes(lookupfile, errors='surrogate_or_strict'), 'rb') as f:
template_data = to_text(f.read(), errors='surrogate_or_strict')
# set jinja2 internal search path for includes
searchpath = variables.get('ansible_search_path')
if searchpath:
# our search paths aren't actually the proper ones for jinja includes.
# We want to search into the 'templates' subdir of each search path in
# addition to our original search paths.
newsearchpath = []
for p in searchpath:
newsearchpath.append(os.path.join(p, 'templates'))
newsearchpath.append(p)
searchpath = newsearchpath
else:
searchpath = [self._loader._basedir, os.path.dirname(lookupfile)]
self._templar.environment.loader.searchpath = searchpath
# The template will have access to all existing variables,
# plus some added by ansible (e.g., template_{path,mtime}),
# plus anything passed to the lookup with the template_vars=
# argument.
vars = variables.copy()
vars.update(generate_ansible_template_vars(lookupfile))
vars.update(lookup_template_vars)
self._templar.set_available_variables(vars)
# do the templating
res = self._templar.template(template_data, preserve_trailing_newlines=True,
convert_data=convert_data_p, escape_backslashes=False)
ret.append(res)
else:
raise AnsibleError("the template file %s could not be found for the lookup" % term)
return ret
| gpl-3.0 |
kumy/geokrety-api | app/models/events/move.py | 2 | 2782 |
from sqlalchemy import inspect
from app.views.pika_ import pika_
from geokrety_api_models import Move
def _has_changes_that_need_recompute(instance):
if inspect(instance).attrs.type.history.has_changes() or \
inspect(instance).attrs.moved_on_datetime.history.has_changes() or \
inspect(instance).attrs.geokret.history.has_changes():
return True
def after_flush_move(session, flush_context):
for instance in session.new:
if not isinstance(instance, Move):
continue
with pika_.pool.acquire() as cxn:
cxn.channel.basic_publish(exchange='geokrety',
routing_key="geokrety.move.insert",
body="geokret_id:{0.geokret.id} "
"move_id:{0.id} "
"move_type:{0.type} "
"user_id:{0.author.id}".format(instance))
for instance in session.dirty:
if not isinstance(instance, Move):
continue
if _has_changes_that_need_recompute(instance):
with pika_.pool.acquire() as cxn:
cxn.channel.basic_publish(exchange='geokrety',
routing_key="geokrety.move.update",
body="geokret_id:{0.geokret.id} "
"move_id:{0.id} "
"move_type:{0.type} "
"user_id:{0.author.id}".format(instance))
if inspect(instance).attrs.geokret.history.has_changes():
for old_geokret_id in inspect(instance).attrs.geokret.history[2]:
cxn.channel.basic_publish(exchange='geokrety',
routing_key="geokrety.move.update",
body="geokret_id:{1} "
"move_id:{0.id} "
"move_type:{0.type} "
"user_id:{0.author.id}".format(instance, old_geokret_id))
for instance in session.deleted:
if not isinstance(instance, Move):
continue
with pika_.pool.acquire() as cxn:
cxn.channel.basic_publish(exchange='geokrety',
routing_key="geokrety.move.delete",
body="geokret_id:{0.geokret.id} "
"move_id:{0.id} "
"move_type:{0.type} "
"user_id:{0.author.id}".format(instance))
| gpl-3.0 |
h-naoto/gobgp | tools/pyang_plugins/bgpyang2golang.py | 1 | 20017 | # Copyright (C) 2014,2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optparse
import StringIO
import sys
from pyang import plugin
_COPYRIGHT_NOTICE = """
// DO NOT EDIT
// generated by pyang using OpenConfig https://github.com/openconfig/public
//
// Copyright (C) 2014,2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
"""
emitted_type_names = {}
def pyang_plugin_init():
plugin.register_plugin(GolangPlugin())
class GolangPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['golang'] = self
def emit(self, ctx, modules, fd):
ctx.golang_identity_map = {}
ctx.golang_typedef_map = {}
ctx.golang_struct_def = []
ctx.golang_struct_names = {}
ctx.prefix_rel = {}
ctx.module_deps = []
for m in modules:
check_module_deps(ctx, m)
# visit yang statements
visit_modules(ctx)
# emit bgp_configs
emit_go(ctx)
def visit_modules(ctx):
# visit typedef and identity
for module in ctx.module_deps:
visit_typedef(ctx, module)
visit_identity(ctx, module)
# visit container
for module in ctx.module_deps:
visit_children(ctx, module, module.i_children)
def emit_go(ctx):
ctx.golang_struct_def.reverse()
done = set()
# emit
generate_header(ctx)
for mod in ctx.module_deps:
if mod not in _module_excluded:
emit_typedef(ctx, mod)
emit_identity(ctx, mod)
for struct in ctx.golang_struct_def:
struct_name = struct.uniq_name
if struct_name in done:
continue
emit_class_def(ctx, struct, struct_name, struct.module_prefix)
done.add(struct_name)
def check_module_deps(ctx, module):
own_prefix = module.i_prefix
for k, v in module.i_prefixes.items():
mod = ctx.get_module(v[0])
if mod.i_prefix != own_prefix:
check_module_deps(ctx, mod)
ctx.prefix_rel[mod.i_prefix] = k
if mod not in ctx.module_deps \
and mod.i_modulename not in _module_excluded:
ctx.module_deps.append(mod)
def dig_leafref(type_obj):
reftype = type_obj.i_type_spec.i_target_node.search_one('type')
if is_leafref(reftype):
return dig_leafref(reftype)
else:
return reftype
def emit_class_def(ctx, yang_statement, struct_name, prefix):
o = StringIO.StringIO()
if len(yang_statement.i_children) == 1 and is_list(yang_statement.i_children[0]):
return
print >> o, '//struct for container %s:%s' % (prefix, yang_statement.arg)
print >> o, 'type %s struct {' % convert_to_golang(struct_name)
for child in yang_statement.i_children:
if child.path in _path_exclude:
continue
container_or_list_name = child.uniq_name
val_name_go = convert_to_golang(child.arg)
child_prefix = get_orig_prefix(child.i_orig_module)
tag_name = child.uniq_name.lower()
print >> o, ' // original -> %s:%s' % \
(child_prefix, container_or_list_name)
# case leaf
if is_leaf(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
# case identityref
if type_name == 'identityref':
emit_type_name = convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case leafref
elif type_name == 'leafref':
t = dig_leafref(type_obj)
if is_translation_required(t):
print >> o, ' //%s:%s\'s original type is %s' \
% (child_prefix, container_or_list_name, t.arg)
emit_type_name = translate_type(t.arg)
elif is_identityref(t):
emit_type_name = convert_to_golang(t.search_one('base').arg.split(':')[-1])
else:
emit_type_name = t.arg
# case embeded enumeration
elif type_name == 'enumeration':
emit_type_name = val_name_go
# case translation required
elif is_translation_required(type_obj):
print >> o, ' //%s:%s\'s original type is %s'\
% (child_prefix, container_or_list_name, type_name)
emit_type_name = translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
# print(t)
emit_type_name = t.golang_name
# case 'case'
if is_case(child):
continue
# case leaflist
if is_leaflist(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
val_name_go = val_name_go + 'List'
tag_name += '-list'
# case leafref
if type_name == 'leafref':
t = type_obj.i_type_spec.i_target_node.search_one('type')
emit_type_name = '[]'+t.arg
elif type_name == 'identityref':
emit_type_name = '[]'+convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case translation required
elif is_translation_required(type_obj):
print >> o, ' // original type is list of %s' % (type_obj.arg)
emit_type_name = '[]'+translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = '[]'+type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
emit_type_name = '[]'+t.golang_name
# case container
elif is_container(child) or is_choice(child):
key = child_prefix+':'+container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = t.golang_name
if len(t.i_children) == 1 and is_list(t.i_children[0]):
l = t.i_children[0]
emit_type_name = '[]' + l.golang_name
else:
emit_type_name = t.golang_name
# case list
elif is_list(child):
key = child_prefix+':'+container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = val_name_go + 'List'
tag_name += '-list'
emit_type_name = '[]' + t.golang_name
if is_container(child):
name = emit_type_name
if name.startswith(convert_to_golang(struct_name)) and name.endswith("Config"):
tag_name = 'config'
val_name_go = 'Config'
elif name.startswith(convert_to_golang(struct_name)) and name.endswith("State"):
tag_name = 'state'
val_name_go = 'State'
print >> o, ' {0}\t{1} `mapstructure:"{2}"`'.format(val_name_go, emit_type_name, tag_name)
print >> o, '}'
print o.getvalue()
def get_orig_prefix(module):
orig = module.i_orig_module
if orig:
get_orig_prefix(orig)
else:
return module.i_prefix
def get_path(c):
path = ''
if c.parent is not None:
p = ''
if hasattr(c, 'i_module'):
mod = c.i_module
prefix = mod.search_one('prefix')
p = prefix.arg + ":" if prefix else ''
path = get_path(c.parent) + "/" + p + c.arg
return path
def visit_children(ctx, module, children):
for c in children:
prefix = ''
if is_case(c):
prefix = get_orig_prefix(c.parent.i_orig_module)
c.i_orig_module = c.parent.i_orig_module
else:
prefix = get_orig_prefix(c.i_orig_module)
c.uniq_name = c.arg
if c.arg == 'config':
c.uniq_name = c.parent.uniq_name + '-config'
if c.arg == 'state':
c.uniq_name = c.parent.uniq_name + '-state'
if c.arg == 'graceful-restart' and prefix == 'bgp-mp':
c.uniq_name = 'mp-graceful-restart'
t = c.search_one('type')
# define container embeded enums
if is_leaf(c) and c.search_one('type').arg == 'enumeration':
prefix = module.i_prefix
c.path = get_path(c)
c.golang_name = convert_to_golang(c.arg)
if prefix in ctx.golang_typedef_map:
ctx.golang_typedef_map[prefix][c.arg] = c
else:
ctx.golang_typedef_map[prefix] = {c.arg: c}
if is_list(c) or is_container(c) or is_choice(c):
c.golang_name = convert_to_golang(c.uniq_name)
if is_choice(c):
picks = pickup_choice(c)
c.i_children = picks
if ctx.golang_struct_names.get(prefix+':'+c.uniq_name):
ext_c = ctx.golang_struct_names.get(prefix+':'+c.uniq_name)
ext_c_child_count = len(getattr(ext_c, "i_children"))
current_c_child_count = len(getattr(c, "i_children"))
if ext_c_child_count < current_c_child_count:
c.module_prefix = prefix
ctx.golang_struct_names[prefix+':'+c.uniq_name] = c
idx = ctx.golang_struct_def.index(ext_c)
ctx.golang_struct_def[idx] = c
else:
c.module_prefix = prefix
ctx.golang_struct_names[prefix+':'+c.uniq_name] = c
ctx.golang_struct_def.append(c)
c.path = get_path(c)
# print(c.path)
if hasattr(c, 'i_children'):
visit_children(ctx, module, c.i_children)
def pickup_choice(c):
element = []
for child in c.i_children:
if is_case(child):
element = element + child.i_children
return element
def get_type_spec(stmt):
for s in stmt.substmts:
if hasattr(s, 'i_type_spec'):
type_sp = s.i_type_spec
return type_sp.name
return None
def visit_typedef(ctx, module):
prefix = module.i_prefix
child_map = {}
for stmts in module.substmts:
if stmts.keyword == 'typedef':
stmts.path = get_path(stmts)
# print(stmts.path)
name = stmts.arg
stmts.golang_name = convert_to_golang(name)
child_map[name] = stmts
ctx.golang_typedef_map[prefix] = child_map
if ctx.prefix_rel[prefix] != prefix:
ctx.golang_typedef_map[ctx.prefix_rel[prefix]] = child_map
def visit_identity(ctx, module):
prefix = module.i_prefix
child_map = {}
for stmts in module.substmts:
if stmts.keyword == 'identity':
name = stmts.arg
stmts.golang_name = convert_to_golang(name)
child_map[name] = stmts
base = stmts.search_one('base')
if base:
elems = base.arg.split(':')
if len(elems) > 1:
ctx.golang_identity_map[elems[0]][elems[1]].substmts.append(stmts)
else:
child_map[base.arg].substmts.append(stmts)
ctx.golang_identity_map[prefix] = child_map
def lookup_identity(ctx, default_prefix, identity_name):
result = lookup(ctx.golang_identity_map, default_prefix, identity_name)
return result
def lookup_typedef(ctx, default_prefix, type_name):
result = lookup(ctx.golang_typedef_map, default_prefix, type_name)
return result
def lookup(basemap, default_prefix, key):
if ':' in key:
pref, name = key.split(':')
else:
pref = default_prefix
name = key
if pref in basemap:
return basemap[pref].get(name, None)
else:
return key
def emit_enum(prefix, name, stmt, substmts):
type_name_org = name
type_name = stmt.golang_name
o = StringIO.StringIO()
print >> o, '// typedef for identity %s:%s' % (prefix, type_name_org)
print >> o, 'type %s string' % (type_name)
const_prefix = convert_const_prefix(type_name_org)
print >> o, 'const ('
m = {}
for sub in substmts:
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
m[sub.arg.lower()] = enum_name
print >> o, ' %s %s = "%s"' % (enum_name, type_name, sub.arg.lower())
print >> o, ')\n'
print >> o, 'var %sToIntMap = map[%s]int {' % (type_name, type_name)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print >> o, ' %s: %d,' % (enum_name, i)
print >> o, '}\n'
print >> o, 'func (v %s) ToInt() int {' % (type_name)
print >> o, 'i, ok := %sToIntMap[v]' % (type_name)
print >> o, 'if !ok {'
print >> o, 'return -1'
print >> o, '}'
print >> o, 'return i'
print >> o, '}'
print >> o, 'var IntTo%sMap = map[int]%s {' % (type_name, type_name)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print >> o, ' %d: %s,' % (i, enum_name)
print >> o, '}\n'
print >> o, 'func (v %s) Validate() error {' % (type_name)
print >> o, 'if _, ok := %sToIntMap[v]; !ok {' % (type_name)
print >> o, 'return fmt.Errorf("invalid %s: %%s", v)' % (type_name)
print >> o, '}'
print >> o, 'return nil'
print >> o, '}\n'
if stmt.search_one('default'):
default = stmt.search_one('default')
print >> o, 'func (v %s) Default() %s {' % (type_name, type_name)
print >> o, 'return %s' % m[default.arg.lower()]
print >> o, '}\n'
print >> o, 'func (v %s) DefaultAsNeeded() %s {' % (type_name, type_name)
print >> o, ' if string(v) == "" {'
print >> o, ' return v.Default()'
print >> o, '}'
print >> o, ' return v'
print >> o, '}'
print o.getvalue()
def emit_typedef(ctx, module):
prefix = module.i_prefix
t_map = ctx.golang_typedef_map[prefix]
for name, stmt in t_map.items():
if stmt.path in _typedef_exclude:
continue
# skip identityref type because currently skip identity
if get_type_spec(stmt) == 'identityref':
continue
type_name_org = name
type_name = stmt.golang_name
if type_name in emitted_type_names:
warn = "warning %s: %s has already been emitted from %s.\n"\
% (prefix+":"+type_name_org, type_name_org,
emitted_type_names[type_name])
sys.stderr.write(warn)
continue
emitted_type_names[type_name] = prefix+":"+type_name_org
t = stmt.search_one('type')
o = StringIO.StringIO()
if t.arg == 'enumeration':
emit_enum(prefix, type_name_org, stmt, t.substmts)
elif t.arg == 'union':
print >> o, '// typedef for typedef %s:%s'\
% (prefix, type_name_org)
print >> o, 'type %s string' % (type_name)
else:
print >> o, '// typedef for typedef %s:%s'\
% (prefix, type_name_org)
if not is_builtin_type(t):
m = ctx.golang_typedef_map
for k in t.arg.split(':'):
m = m[k]
print >> o, 'type %s %s' % (type_name, m.golang_name)
else:
print >> o, 'type %s %s' % (type_name, t.arg)
print o.getvalue()
def emit_identity(ctx, module):
prefix = module.i_prefix
i_map = ctx.golang_identity_map[prefix]
for name, stmt in i_map.items():
enums = stmt.search('identity')
if len(enums) > 0:
emit_enum(prefix, name, stmt, enums)
def is_reference(s):
return s.arg in ['leafref', 'identityref']
def is_leafref(s):
return s.arg in ['leafref']
def is_identityref(s):
return s.arg in ['identityref']
def is_leaf(s):
return s.keyword in ['leaf']
def is_leaflist(s):
return s.keyword in ['leaf-list']
def is_list(s):
return s.keyword in ['list']
def is_container(s):
return s.keyword in ['container']
def is_case(s):
return s.keyword in ['case']
def is_choice(s):
return s.keyword in ['choice']
def is_builtin_type(t):
return t.arg in _type_builtin
def is_translation_required(t):
return t.arg in _type_translation_map.keys()
_type_translation_map = {
'union': 'string',
'decimal64': 'float64',
'boolean': 'bool',
'empty': 'bool',
'inet:ip-address': 'string',
'inet:ip-prefix': 'string',
'inet:ipv4-address': 'string',
'inet:as-number': 'uint32',
'bgp-set-community-option-type': 'string',
'inet:port-number': 'uint16',
'yang:timeticks': 'int64',
'ptypes:install-protocol-type': 'string',
}
_type_builtin = ["union",
"int8",
"int16",
"int32",
"int64",
"string",
"uint8",
"uint16",
"uint32",
"uint64",
]
_module_excluded = ["ietf-inet-types",
"ietf-yang-types",
]
_path_exclude = ["/rpol:routing-policy/rpol:defined-sets/rpol:neighbor-sets/rpol:neighbor-set/rpol:neighbor",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:community-sets/bgp-pol:community-set/bgp-pol:community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:ext-community-sets/bgp-pol:ext-community-set/bgp-pol:ext-community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:as-path-sets/bgp-pol:as-path-set/bgp-pol:as-path-set-member"]
_typedef_exclude =[]
def generate_header(ctx):
print _COPYRIGHT_NOTICE
print 'package config'
print ''
print 'import "fmt"'
print ''
def translate_type(key):
if key in _type_translation_map.keys():
return _type_translation_map[key]
else:
return key
# 'hoge-hoge' -> 'HogeHoge'
def convert_to_golang(type_string):
a = type_string.split('-')
a = map(lambda x: x.capitalize(), a) # XXX locale sensitive
return ''.join(a)
# 'hoge-hoge' -> 'HOGE_HOGE'
def convert_const_prefix(type_string):
a = type_string.split('-')
a = map(lambda x: x.upper(), a) # XXX locale sensitive
return '_'.join(a)
def chop_suf(s, suf):
if not s.endswith(suf):
return s
return s[:-len(suf)]
| apache-2.0 |
mancoast/CPythonPyc_test | crash/274_test_exceptions.py | 45 | 26019 | # Python test set -- part 5, built-in exceptions
import os
import sys
import unittest
import pickle, cPickle
from test.test_support import (TESTFN, unlink, run_unittest, captured_output,
check_warnings, cpython_only)
from test.test_pep352 import ignore_deprecation_warnings
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def testReload(self):
# Reloading the built-in exceptions module failed prior to Py2.2, while it
# should act the same as reloading built-in sys.
try:
from imp import reload
import exceptions
reload(exceptions)
except ImportError, e:
self.fail("reloading exceptions: %s" % e)
def raise_catch(self, exc, excname):
try:
raise exc, "spam"
except exc, err:
buf1 = str(err)
try:
raise exc("spam")
except exc, err:
buf2 = str(err)
self.assertEqual(buf1, buf2)
self.assertEqual(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
sys.stdin = fp
x = raw_input()
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(IOError, "IOError")
self.assertRaises(IOError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec '/\n'
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
# can only be tested under -tt, and is the only test for -tt
#try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n", '<string>', 'exec')
#except TabError: pass
#else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 10000)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1 // 0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1 // 0
except Exception, e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError, e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException:
def __init__(self_):
raise RuntimeError, "can't instantiate BadException"
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError, err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions'+os.extsep+'py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEqual(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertEqual(str(WindowsError(1001)),
"1001")
self.assertEqual(str(WindowsError(1001, "message")),
"[Error 1001] message")
self.assertEqual(WindowsError(1001, "message").errno, 22)
self.assertEqual(WindowsError(1001, "message").winerror, 1001)
@ignore_deprecation_warnings
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'message' : '', 'args' : ()}),
(BaseException, (1, ), {'message' : 1, 'args' : (1,)}),
(BaseException, ('foo',),
{'message' : 'foo', 'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'message' : '', 'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'code' : 'foo'}),
(IOError, ('foo',),
{'message' : 'foo', 'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(IOError, ('foo', 'bar'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz'),
{'message' : '', 'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(IOError, ('foo', 'bar', 'baz', 'quux'),
{'message' : '', 'args' : ('foo', 'bar', 'baz', 'quux')}),
(EnvironmentError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(EnvironmentError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'message' : '', 'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'message' : 'msgStr', 'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'message' : '', 'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'message' : '', 'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'message' : '', 'args' : (),}),
(UnicodeEncodeError, ('ascii', u'a', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', u'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : u'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', '\xff', 0, 1, 'ordinal not in range'),
{'message' : '', 'args' : ('ascii', '\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : '\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, (u"\u3042", 0, 1, "ouch"),
{'message' : '', 'args' : (u'\u3042', 0, 1, 'ouch'),
'object' : u'\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
]
try:
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'message' : '', 'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : 1,
'errno' : 22, 'filename' : 'filenameStr'})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
raise exc(*args)
except BaseException, e:
if type(e) is not exc:
raise
# Verify module name
self.assertEqual(type(e).__module__, 'exceptions')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
self.assertEqual(repr(getattr(e, checkArgName)),
repr(expected[checkArgName]),
'exception "%s", attribute "%s"' %
(repr(e), checkArgName))
# test for pickling support
for p in pickle, cPickle:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
new = p.loads(p.dumps(e, protocol))
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s"' %
(e, checkArgName))
def testDeprecatedMessageAttribute(self):
# Accessing BaseException.message and relying on its value set by
# BaseException.__init__ triggers a deprecation warning.
exc = BaseException("foo")
with check_warnings(("BaseException.message has been deprecated "
"as of Python 2.6", DeprecationWarning)) as w:
self.assertEqual(exc.message, "foo")
self.assertEqual(len(w.warnings), 1)
def testRegularMessageAttribute(self):
# Accessing BaseException.message after explicitly setting a value
# for it does not trigger a deprecation warning.
exc = BaseException("foo")
exc.message = "bar"
with check_warnings(quiet=True) as w:
self.assertEqual(exc.message, "bar")
self.assertEqual(len(w.warnings), 0)
# Deleting the message is supported, too.
del exc.message
with self.assertRaises(AttributeError):
exc.message
@ignore_deprecation_warnings
def testPickleMessageAttribute(self):
# Pickling with message attribute must work, as well.
e = Exception("foo")
f = Exception("foo")
f.message = "bar"
for p in pickle, cPickle:
ep = p.loads(p.dumps(e))
self.assertEqual(ep.message, "foo")
fp = p.loads(p.dumps(f))
self.assertEqual(fp.message, "bar")
@ignore_deprecation_warnings
def testSlicing(self):
# Test that you can slice an exception directly instead of requiring
# going through the 'args' attribute.
args = (1, 2, 3)
exc = BaseException(*args)
self.assertEqual(exc[:], args)
self.assertEqual(exc.args[:], args)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEqual(x.fancy_arg, 42)
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
# The test prints an unraisable recursion error when
# doing "except ValueError", this is because subclass
# checking has recursion checking too.
with captured_output("stderr"):
try:
g()
except RuntimeError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def testUnicodeStrUsage(self):
# Make sure both instances and classes have a str and unicode
# representation.
self.assertTrue(str(Exception))
self.assertTrue(unicode(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(unicode(Exception(u'a')))
self.assertTrue(unicode(Exception(u'\xe1')))
def testUnicodeChangeAttributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', u'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character u'\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError(u'xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character u'\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception):
__metaclass__ = Meta
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException, e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
with captured_output("stderr") as stderr:
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assertTrue(e is RuntimeError, e)
self.assertIn("maximum recursion depth exceeded", str(v))
def test_new_returns_invalid_instance(self):
# See issue #11627.
class MyException(Exception):
def __new__(cls, *args):
return object()
with self.assertRaises(TypeError):
raise MyException
def test_assert_with_tuple_arg(self):
try:
assert False, (3,)
except AssertionError as e:
self.assertEqual(str(e), "(3,)")
def test_bad_exception_clearing(self):
# See issue 16445: use of Py_XDECREF instead of Py_CLEAR in
# BaseException_set_message gave a possible way to segfault the
# interpreter.
class Nasty(str):
def __del__(message):
del e.message
e = ValueError(Nasty("msg"))
e.args = ()
del e.message
# Helper class used by TestSameStrAndUnicodeMsg
class ExcWithOverriddenStr(Exception):
"""Subclass of Exception that accepts a keyword 'msg' arg that is
returned by __str__. 'msg' won't be included in self.args"""
def __init__(self, *args, **kwargs):
self.msg = kwargs.pop('msg') # msg should always be present
super(ExcWithOverriddenStr, self).__init__(*args, **kwargs)
def __str__(self):
return self.msg
class TestSameStrAndUnicodeMsg(unittest.TestCase):
"""unicode(err) should return the same message of str(err). See #6108"""
def check_same_msg(self, exc, msg):
"""Helper function that checks if str(exc) == unicode(exc) == msg"""
self.assertEqual(str(exc), msg)
self.assertEqual(str(exc), unicode(exc))
def test_builtin_exceptions(self):
"""Check same msg for built-in exceptions"""
# These exceptions implement a __str__ method that uses the args
# to create a better error message. unicode(e) should return the same
# message.
exceptions = [
SyntaxError('invalid syntax', ('<string>', 1, 3, '2+*3')),
IOError(2, 'No such file or directory'),
KeyError('both should have the same quotes'),
UnicodeDecodeError('ascii', '\xc3\xa0', 0, 1,
'ordinal not in range(128)'),
UnicodeEncodeError('ascii', u'\u1234', 0, 1,
'ordinal not in range(128)')
]
for exception in exceptions:
self.assertEqual(str(exception), unicode(exception))
def test_0_args(self):
"""Check same msg for Exception with 0 args"""
# str() and unicode() on an Exception with no args should return an
# empty string
self.check_same_msg(Exception(), '')
def test_0_args_with_overridden___str__(self):
"""Check same msg for exceptions with 0 args and overridden __str__"""
# str() and unicode() on an exception with overridden __str__ that
# returns an ascii-only string should return the same string
for msg in ('foo', u'foo'):
self.check_same_msg(ExcWithOverriddenStr(msg=msg), msg)
# if __str__ returns a non-ascii unicode string str() should fail
# but unicode() should return the unicode string
e = ExcWithOverriddenStr(msg=u'f\xf6\xf6') # no args
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_1_arg(self):
"""Check same msg for Exceptions with 1 arg"""
for arg in ('foo', u'foo'):
self.check_same_msg(Exception(arg), arg)
# if __str__ is not overridden and self.args[0] is a non-ascii unicode
# string, str() should try to return str(self.args[0]) and fail.
# unicode() should return unicode(self.args[0]) and succeed.
e = Exception(u'f\xf6\xf6')
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_1_arg_with_overridden___str__(self):
"""Check same msg for exceptions with overridden __str__ and 1 arg"""
# when __str__ is overridden and __unicode__ is not implemented
# unicode(e) returns the same as unicode(e.__str__()).
for msg in ('foo', u'foo'):
self.check_same_msg(ExcWithOverriddenStr('arg', msg=msg), msg)
# if __str__ returns a non-ascii unicode string, str() should fail
# but unicode() should succeed.
e = ExcWithOverriddenStr('arg', msg=u'f\xf6\xf6') # 1 arg
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
def test_many_args(self):
"""Check same msg for Exceptions with many args"""
argslist = [
(3, 'foo'),
(1, u'foo', 'bar'),
(4, u'f\xf6\xf6', u'bar', 'baz')
]
# both str() and unicode() should return a repr() of the args
for args in argslist:
self.check_same_msg(Exception(*args), repr(args))
def test_many_args_with_overridden___str__(self):
"""Check same msg for exceptions with overridden __str__ and many args"""
# if __str__ returns an ascii string / ascii unicode string
# both str() and unicode() should succeed
for msg in ('foo', u'foo'):
e = ExcWithOverriddenStr('arg1', u'arg2', u'f\xf6\xf6', msg=msg)
self.check_same_msg(e, msg)
# if __str__ returns a non-ascii unicode string, str() should fail
# but unicode() should succeed
e = ExcWithOverriddenStr('arg1', u'f\xf6\xf6', u'arg3', # 3 args
msg=u'f\xf6\xf6')
self.assertRaises(UnicodeEncodeError, str, e)
self.assertEqual(unicode(e), u'f\xf6\xf6')
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
def test_main():
run_unittest(ExceptionTests, TestSameStrAndUnicodeMsg)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
njase/numpy | numpy/linalg/tests/test_build.py | 131 | 1822 | from __future__ import division, absolute_import, print_function
from subprocess import PIPE, Popen
import sys
import re
from numpy.linalg import lapack_lite
from numpy.testing import TestCase, dec, run_module_suite
from numpy.compat import asbytes_nested
class FindDependenciesLdd(object):
def __init__(self):
self.cmd = ['ldd']
try:
p = Popen(self.cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
except OSError:
raise RuntimeError("command %s cannot be run" % self.cmd)
def get_dependencies(self, lfile):
p = Popen(self.cmd + [lfile], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not (p.returncode == 0):
raise RuntimeError("failed dependencies check for %s" % lfile)
return stdout
def grep_dependencies(self, lfile, deps):
stdout = self.get_dependencies(lfile)
rdeps = dict([(dep, re.compile(dep)) for dep in deps])
founds = []
for l in stdout.splitlines():
for k, v in rdeps.items():
if v.search(l):
founds.append(k)
return founds
class TestF77Mismatch(TestCase):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
def test_lapack(self):
f = FindDependenciesLdd()
deps = f.grep_dependencies(lapack_lite.__file__,
asbytes_nested(['libg2c', 'libgfortran']))
self.assertFalse(len(deps) > 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
chainer/chainer | tests/chainer_tests/test_function_node.py | 3 | 43917 | from __future__ import print_function
import threading
import unittest
import mock
import numpy
import pytest
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
# TODO(hvy): Remove the following import once testing.backend is imported
# in testing/__init__.py
import chainer.testing.backend
from chainer import utils
from chainer.utils import type_check
import chainerx
if chainerx.is_available():
import chainerx.testing
def make_array(start, shape, dtype, device):
size = numpy.product(shape, dtype='i')
a = numpy.arange(start, start + size)
a = a.reshape(shape)
a = a.astype(dtype, copy=False)
return device.send(a)
@testing.parameterize(*testing.product({
'y_shape': [(4,), (0,), (2, 3), ()],
'x_shape': [(3,), (0,), (4, 1), ()],
}))
class TestFunctionNode(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def _setup(self, device):
y_shape = self.y_shape
x_shape = self.x_shape
y1 = make_array(1, y_shape, numpy.float32, device)
y2 = make_array(2, y_shape, numpy.float32, device)
gx1 = chainer.Variable(
make_array(1, x_shape, numpy.float32, device))
gx2 = None
gy1 = make_array(1, y_shape, numpy.float32, device)
gy2 = make_array(1, y_shape, numpy.float32, device)
f = chainer.FunctionNode()
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock()
f.forward_gpu = mock.MagicMock()
f.backward = mock.MagicMock(return_value=(gx1, gx2))
self.f = f
self.x1 = make_array(0, x_shape, numpy.float32, device)
self.x2 = make_array(0, x_shape, numpy.int32, device)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gx1_orig = chainer.Variable(
make_array(3, x_shape, numpy.float32, device))
self.gx2_orig = chainer.Variable(
make_array(2, x_shape, numpy.float32, device))
self.gx1_accum = gx1 + self.gx1_orig
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_cpu(self):
self._setup(backend.CpuDevice())
self.f.forward_cpu = mock.MagicMock(return_value=(self.y1, self.y2))
def setup_gpu(self):
self._setup(backend.GpuDevice.from_device_id(0))
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
def setup_chainerx(self, device_name='native:0'):
self._setup(chainer.get_device(device_name))
self.f.forward = mock.MagicMock(side_effect=lambda inputs: (
utils.force_array(inputs[0] * inputs[1]),
utils.force_array(inputs[0] + inputs[1])))
def check_forward(self, gpu):
y1, y2 = self.f.forward((self.x1, self.x2))
self.assertEqual(self.f.check_type_forward.call_count, 0)
self.assertEqual(self._get_method('forward', not gpu).call_count, 0)
self._get_method('forward', gpu).assert_called_once_with(
(self.x1, self.x2))
self.assertTrue((cuda.to_cpu(y1) == cuda.to_cpu(self.y1)).all())
self.assertTrue((cuda.to_cpu(y2) == cuda.to_cpu(self.y2)).all())
def test_forward_cpu(self):
self.setup_cpu()
self.check_forward(False)
@attr.gpu
def test_forward_gpu(self):
self.setup_gpu()
self.check_forward(True)
def check_check_type_forward(self):
self.assertEqual(self.f.check_type_forward.call_count, 1)
ts = self.f.check_type_forward.call_args[0][0]
self.assertIsInstance(ts, type_check.LightTypeInfoTuple)
self.assertEqual(len(ts), 2)
t1 = ts[0]
assert t1.shape == self.x_shape
assert t1.dtype == numpy.float32
t2 = ts[1]
assert t2.shape == self.x_shape
assert t2.dtype == numpy.int32
def check_apply(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
x1._node._rank = 1
x2._node._rank = 3
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 4)
self.assertIs(y.creator_node, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator_node.outputs, tuple)
def check_apply_chainerx(self):
x1 = chainer.Variable(self.x1)
# TODO(sonots): ChainerX does not support computing gradients for int32
x2 = chainer.Variable(self.x2, requires_grad=False)
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, chainerx.ndarray)
self.assertIs(y.data.device, self.x1.device)
self.assertTrue(y.requires_grad)
def test_apply_cpu(self):
self.setup_cpu()
self.check_apply()
@attr.gpu
def test_apply_gpu(self):
self.setup_gpu()
self.check_apply()
@attr.chainerx
def test_apply_chainerx_cpu(self):
self.setup_chainerx()
self.check_apply_chainerx()
@attr.chainerx
@attr.gpu
def test_apply_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.check_apply_chainerx()
@attr.chainerx
@attr.multi_gpu(2)
def test_apply_chainerx_multi_gpu(self):
self.setup_chainerx('cuda:1')
self.check_apply_chainerx()
def check_apply_all_ndarray(self):
x1 = self.x1
x2 = self.x2
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
xp = backend.get_array_module(x1)
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, xp.ndarray)
self.assertFalse(y.requires_grad)
def test_apply_all_ndarray_cpu(self):
self.setup_cpu()
self.check_apply_all_ndarray()
@attr.gpu
def test_apply_all_ndarray_gpu(self):
self.setup_gpu()
self.check_apply_all_ndarray()
@attr.chainerx
def test_apply_all_ndarray_chainerx_cpu(self):
self.setup_chainerx()
self.check_apply_all_ndarray()
@attr.chainerx
@attr.gpu
def test_apply_all_ndarray_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.check_apply_all_ndarray()
def check_apply_ndarray(self):
x1 = chainer.Variable(self.x1)
x2 = self.x2
x1._node._rank = 1
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 2)
self.assertIs(y.creator_node, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator_node.outputs, tuple)
def check_apply_ndarray_chainerx(self):
x1 = chainer.Variable(self.x1)
x2 = self.x2
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, chainerx.ndarray)
self.assertIs(y.data.device, self.x1.device)
self.assertTrue(y.requires_grad)
def test_apply_ndarray_cpu(self):
self.setup_cpu()
self.check_apply_ndarray()
@attr.gpu
def test_apply_ndarray_gpu(self):
self.setup_gpu()
self.check_apply_ndarray()
@attr.chainerx
def test_apply_ndarray_chainerx_cpu(self):
self.setup_chainerx()
self.check_apply_ndarray_chainerx()
@attr.chainerx
@attr.gpu
def test_apply_ndarray_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.check_apply_ndarray_chainerx()
def check_apply_single_return_value(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
ret, = self.f.apply((x1, x2))
self.assertIsInstance(ret, chainer.Variable)
def check_apply_single_return_value_chainerx(self):
x1 = chainer.Variable(self.x1)
# TODO(sonots): ChainerX does not support computing gradients for int32
x2 = chainer.Variable(self.x2, requires_grad=False)
ret, = self.f.apply((x1, x2))
self.assertIsInstance(ret, chainer.Variable)
self.assertIsInstance(ret.data, chainerx.ndarray)
self.assertIs(ret.data.device, self.x1.device)
def test_apply_single_return_value_cpu(self):
self.setup_cpu()
self.f.forward_cpu.return_value = (self.y1,)
self.check_apply_single_return_value()
@attr.gpu
def test_apply_single_return_value_gpu(self):
self.setup_gpu()
self.f.forward_gpu.return_value = (self.y1,)
self.check_apply_single_return_value()
@attr.chainerx
def test_apply_single_return_value_chainerx_cpu(self):
self.setup_chainerx()
self.f.forward.side_effect = lambda inputs: (
utils.force_array(inputs[0] * inputs[1]),)
self.check_apply_single_return_value_chainerx()
@attr.chainerx
@attr.gpu
def test_apply_single_return_value_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.f.forward.side_effect = lambda inputs: (
utils.force_array(inputs[0] * inputs[1]),)
self.check_apply_single_return_value_chainerx()
def _get_f(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
y1, y2 = self.f.apply((x1, x2))
f = y1.creator_node
# To test weak refernece, return only x1 and y1.
# x2 and y2 are deleted by the garbage collector
return f, x1, y1
def test_unchain(self):
self.setup_cpu()
f, _x1, _y1 = self._get_f()
y1, y2 = f.outputs
f.unchain()
# As _y1 is alive, this weak ref is also alive
y1_ref = y1()
self.assertIsNotNone(y1_ref)
self.assertIsNone(y1_ref.creator)
# This weak ref is dead by unchain
y2_ref = y2()
self.assertIsNone(y2_ref)
self.assertIsNone(f.inputs)
def test_label(self):
self.setup_cpu()
self.assertEqual(self.f.label, 'FunctionNode')
class TestFunctionNodeMixChainerxAndXpArrays(unittest.TestCase):
class SimpleFunctionNode(chainer.FunctionNode):
def __init__(self, xp):
self.xp = xp
def forward(self, inputs):
x1, x2 = inputs
assert isinstance(x1, self.xp.ndarray)
assert isinstance(x2, self.xp.ndarray)
return x1 * x2,
def check_mix_xp(self, xp):
xp_x1 = xp.random.randn(2, 3).astype(numpy.float32)
xp_x2 = xp.random.randn(2, 3).astype(numpy.float32)
x2 = backend.to_chx(xp_x2)
fnode = self.SimpleFunctionNode(xp)
with self.assertRaises(TypeError):
fnode.apply((xp_x1, x2))
@attr.chainerx
def test_mix_numpy(self):
self.check_mix_xp(numpy)
@attr.chainerx
@attr.gpu
def test_mix_cupy(self):
self.check_mix_xp(cuda.cupy)
class TestFunctionNodeInvalidType(unittest.TestCase):
def test_forward_invalid1(self):
class FunctionNode(chainer.FunctionNode):
def check_type_forward(self, in_types):
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
)
def forward(self, inputs):
return inputs
f = FunctionNode()
# OK
v = chainer.Variable(numpy.random.randn(1, 5).astype(numpy.float32))
result, = f.apply((v,))
assert isinstance(result, chainer.Variable)
# Incorrect dtype
# in py3, numpy dtypes are represented as class
msg = """\
Invalid operation is performed in: FunctionNode \\(Forward\\)
Expect: in_types\\[0\\]\\.dtype == <(type|class) 'numpy\\.float32'>
Actual: float64 \\!= <(type|class) 'numpy\\.float32'>"""
v = chainer.Variable(numpy.random.randn(1, 5))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f.apply((v,))
# Incorrect dim
msg = """\
Invalid operation is performed in: FunctionNode \\(Forward\\)
Expect: in_types\\[0\\]\\.ndim >= 2
Actual: 1 < 2"""
v = chainer.Variable(numpy.random.randn(5).astype(numpy.float32))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f.apply((v,))
class TestFunctionNodeForwardTypeCheck(unittest.TestCase):
def setUp(self):
self.x1 = numpy.random.rand(2, 3).astype(numpy.float32)
self.x2 = numpy.random.rand(2, 3).astype(numpy.float32)
def test_invalid_output_type(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
return object(),
f = FunctionNode()
x1 = chainer.Variable(self.x1)
with six.assertRaisesRegex(
self,
TypeError,
'forward output must be a tuple of ndarrays'):
f.apply((x1,))
@attr.gpu
def test_inconsistent_input_backends(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
return inputs
f = FunctionNode()
# Cause inconsistency between inputs
x1 = cuda.to_gpu(self.x1)
x1 = chainer.Variable(x1)
x2 = chainer.Variable(self.x2)
with self.assertRaises(TypeError):
f.apply((x1, x2))
@attr.gpu
def test_inconsistent_output_backends(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
# Cause inconsistency between outputs
return inputs[0], cuda.to_gpu(inputs[1])
f = FunctionNode()
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
with self.assertRaises(TypeError):
f.apply((x1, x2))
@testing.parameterize(
{'return_value': (numpy.array([float('nan')], numpy.float32),),
'valid': False},
{'return_value': (numpy.array([1], numpy.int32),), 'valid': True},
)
class TestFunctionNodeForwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array([1], numpy.float32)
self.f = chainer.FunctionNode()
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_forward(self, x_data):
x = chainer.Variable(x_data)
if self.valid:
# check if forward throws nothing
self.f.apply((x,))
else:
with self.assertRaises(RuntimeError):
self.f.apply((x,))
def test_debug_forward_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=self.return_value)
self.check_debug_forward(self.one)
@attr.gpu
def test_debug_forward_gpu(self):
return_value = tuple(None if x is None else cuda.to_gpu(x)
for x in self.return_value)
self.f.forward_gpu = mock.MagicMock(return_value=return_value)
self.check_debug_forward(cuda.to_gpu(self.one))
@testing.backend.inject_backend_tests(
None,
testing.product({'use_cuda': [True, False]}))
class TestFunctionNodeInvalidBackwardChecks(unittest.TestCase):
"""Tests FunctionNode.backward correctness checks"""
def setUp(self):
self.f = chainer.FunctionNode()
def _dummy_func(self, bwd_return_data):
# Create a dummy func that returns `bwd_return_data` in the
# `backward` method.
def one(xp):
return xp.array(1, numpy.float32)
class DummyFunc(chainer.FunctionNode):
def forward_cpu(self, inputs):
return one(numpy),
def forward_gpu(self, inputs):
return one(cuda.cupy),
def backward(self, indexes, grad_outputs):
return bwd_return_data
return DummyFunc()
def check_debug_backward_accumulate(
self, backend_config, f, xs_data, errors, initial_gxs=None):
# `errors` is a dict, where keys are True or False indicating the
# debug mode to run the test, and values are tuple of expected
# exception type and error message pattern.
for debug_mode, error in errors.items():
def to_xp(arrs):
if backend_config.use_cuda:
return cuda.to_gpu(arrs)
else:
return arrs
# Convert arrays to GPU
xs_data = to_xp(xs_data)
if initial_gxs is not None:
initial_gxs = to_xp(initial_gxs)
# Call forward
xs = [chainer.Variable(x) for x in xs_data]
y, = f.apply(xs)
# Set initial input grads, if given
if initial_gxs is not None:
assert len(xs) == len(initial_gxs)
for x, gx in zip(xs, initial_gxs):
x.grad = gx
# Call backward & check error
with chainer.using_config('debug', debug_mode):
if error is None:
y.backward() # no error should be raised
else:
error_type, error_regex = error
with pytest.raises(error_type, match=error_regex):
y.backward()
def test_ok(self, backend_config):
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(numpy.array([2.0], numpy.float32)),)),
xs_data=(numpy.array([1], numpy.float32),),
errors={False: None, True: None})
def test_gradients_has_nan(self, backend_config):
# Returns a gradient that has NaN value
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((chainer.Variable(numpy.array(
[float('nan')], numpy.float32)),)),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (RuntimeError,
'NaN is detected on backward computation')})
def test_invalid_number_of_gradients(self, backend_config):
# Returns more gradients than expected
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(numpy.array([2.0], numpy.float32)),
chainer.Variable(numpy.array([1.0], numpy.float32)))),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'number of gradients returned from backward is '
'incorrect')})
def test_invalid_zero_gradients(self, backend_config):
# Returns 0 gradients while 1 expected
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func(()),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'number of gradients returned from backward is '
'incorrect')})
def test_invalid_gradient_shape(self, backend_config):
# Returns gradient of incorrect shape
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(
backend_config.xp.array([2, 3], numpy.float32)),)),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'shape of gradients returned from backward is '
'incorrect')})
def test_invalid_gradient_type(self, backend_config):
# Incorrectly returns a gradient as ndarray instead of variable
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
backend_config.xp.array([2.0], numpy.float32))),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'type of gradients returned from backward is '
'incorrect')})
def test_invalid_gradient_dtype(self, backend_config):
# Incorrectly returns a gradient with incorrect dtype, compared to
# initially set gradients.
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(
backend_config.xp.array([2.0], numpy.int64)),)),
xs_data=(numpy.array([1], numpy.float32),),
initial_gxs=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'dtype of gradients returned from backward is '
'incorrect')})
class TestNoBackpropMode(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.array([1.], 'f'))
def test_no_backprop_mode(self):
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.no_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
def test_force_backprop_mode(self):
with chainer.no_backprop_mode():
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
@attr.chainerx
def test_backprop_mode_affects_chainerx(self):
# chainer.{no,force}_backprop_mode should affect chainerx's
# counterpart.
assert chainerx.is_backprop_required()
# nobp
with chainer.no_backprop_mode():
assert not chainerx.is_backprop_required()
# nobp > forcebp
with chainer.force_backprop_mode():
assert chainerx.is_backprop_required()
# nobp > nobp
with chainer.no_backprop_mode():
assert not chainerx.is_backprop_required()
assert chainerx.is_backprop_required()
# forcebp
with chainer.force_backprop_mode():
assert chainerx.is_backprop_required()
# forcebp > forcebp
with chainer.force_backprop_mode():
assert chainerx.is_backprop_required()
# forcebp > nobp
with chainer.no_backprop_mode():
assert not chainerx.is_backprop_required()
assert chainerx.is_backprop_required()
class MyThread(threading.Thread):
def run(self):
x = chainer.Variable(numpy.array([1], dtype='f'))
with chainer.no_backprop_mode():
y = x + 1
self.creator_is_none = y.creator_node is None
class TestBackpropModeMultiThread(unittest.TestCase):
def test_multi_thread(self):
t = MyThread()
t.start()
t.join()
self.assertTrue(t.creator_is_none)
class FunctionNodeWithRetaining(chainer.FunctionNode):
def __init__(self, input_indices, output_indices):
self.input_indices = input_indices
self.output_indices = output_indices
def forward(self, inputs):
self.retain_inputs(self.input_indices)
self.retain_outputs(self.output_indices)
return inputs
def backward(self, _, grad_outputs):
self.retained_backward_inputs = self.get_retained_inputs()
self.retained_backward_outputs = self.get_retained_outputs()
return grad_outputs
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_cuda': True},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
class TestFunctionNodeRetaining(unittest.TestCase):
def test_retain(self, backend_config):
xp = backend_config.xp
input_arrs = backend_config.get_array([
numpy.array([2], dtype=numpy.float32),
numpy.array([-1], dtype=numpy.float32)])
inputs = [
chainer.Variable(input_arrs[0]),
chainer.Variable(input_arrs[1], requires_grad=False)]
input_arrays = [x.array for x in inputs]
if xp is not chainerx:
input_nodes = [x.node for x in inputs]
f = FunctionNodeWithRetaining([1], [0, 1])
outputs = f.apply(inputs)
outputs[0].grad = backend_config.get_array(
numpy.array([1], dtype=numpy.float32))
outputs[0].backward()
output_arrays = [y.array for y in outputs]
inputs = None # release non-retained inputs
assert len(f.retained_backward_inputs) == 1
assert len(f.retained_backward_outputs) == 2
if xp is not chainerx:
assert f.retained_backward_inputs[0].node is input_nodes[1]
xp.testing.assert_array_equal(
f.retained_backward_inputs[0].array, input_arrays[1])
xp.testing.assert_array_equal(
f.retained_backward_outputs[0].array, output_arrays[0])
xp.testing.assert_array_equal(
f.retained_backward_outputs[1].array, output_arrays[1])
def check_no_retain(self, backend_config, skip_call):
# This test ensures get_retained_{in,out}puts returns () if no
# input/output is retained.
# skip_call: If False, retain_{in,out}puts() is not called.
class MyFunc(chainer.FunctionNode):
backward_called = 0
def forward(self, inputs):
x, = inputs
if not skip_call:
self.retain_outputs(())
self.retain_inputs(())
return x * 3,
def backward(self, input_indices, grad_outputs):
self.backward_called += 1
assert self.get_retained_outputs() == ()
assert self.get_retained_inputs() == ()
gy, = grad_outputs
return gy * 3,
x_arr = backend_config.get_array(numpy.array([1, 2], numpy.float32))
x = chainer.Variable(x_arr, requires_grad=True)
func = MyFunc()
y, = func.apply((x,))
y.grad = backend_config.get_array(numpy.array([1, 1], numpy.float32))
y.backward()
assert func.backward_called == 1
def test_no_retain(self, backend_config):
self.check_no_retain(backend_config, False)
self.check_no_retain(backend_config, True)
def _get_value(x):
if isinstance(x, chainer.Variable):
return x.data
return x
class TestGradTypeCheck(unittest.TestCase):
def test_type_check(self):
x = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
y = x * x
gx = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
gy = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
chainer.grad([y], [x], [gx], [gy])
chainer.grad((y,), (x,), (gx,), (gy,))
with self.assertRaises(TypeError):
chainer.grad(y, [x], [gx], [gy])
with self.assertRaises(TypeError):
chainer.grad([y], x, [gx], [gy])
with self.assertRaises(TypeError):
chainer.grad([y], [x], gx, [gy])
with self.assertRaises(TypeError):
chainer.grad([y], [x], [gx], gy)
class TestGradValueCheck(unittest.TestCase):
def test_length_check(self):
x = chainer.Variable(numpy.array(3, numpy.float32))
y = chainer.functions.identity(x)
with self.assertRaises(ValueError):
chainer.grad([y], [x], [], [None])
with self.assertRaises(ValueError):
chainer.grad([y], [x], [None, None], [None])
with self.assertRaises(ValueError):
chainer.grad([y], [x], [None], [])
with self.assertRaises(ValueError):
chainer.grad([y], [x], [None], [None, None])
class GradTestBase(object):
shape = 3,
x_names = ()
y_names = ()
loss_scale = None
extend_graph_x = False
extend_graph_y = False
def _init_attrs(self, names):
ret = []
for name in names:
v = chainer.Variable(
numpy.random.randint(-4, 6, self.shape).astype('f'), name=name)
ret.append(v)
setattr(self, name, v)
return ret
def _init_ones(self, names):
ret = []
for name in names:
v = chainer.Variable(numpy.ones(self.shape, dtype='f'))
ret.append(v)
setattr(self, name, v)
return ret
@staticmethod
def _get_value(x):
if isinstance(x, chainer.Variable):
return x.data
return x
@staticmethod
def _to_grad_names(names):
return ['g%s' % name for name in names]
def setUp(self):
self.xs = self._init_attrs(self.x_names)
self.gxs = self._init_attrs(self._to_grad_names(self.x_names))
self.gys = self._init_attrs(self._to_grad_names(self.y_names))
if self.loss_scale is not None:
self._init_ones(self._to_grad_names(self.y_names))
self.gys = None
def use_device(self, device):
for value in six.itervalues(self.__dict__):
if isinstance(value, chainer.Variable):
value.to_device(device)
def forward(self):
raise NotImplementedError
def expected_grad(self):
raise NotImplementedError
def expected_double_grad(self):
raise NotImplementedError
def _print_variables(self, name, vs):
print('{}: '.format(name), end='')
print(*(self._get_value(v) for v in vs), sep=', ')
def _print_inputs(self):
self._print_variables('xs ', self.xs)
self._print_variables('gxs ', self.gxs)
self._print_variables('gys ', self.gys)
def check_grad(self):
self.forward()
ys = [getattr(self, name) for name in self.y_names]
if self.extend_graph_y:
self._ys = [v * 1. for v in ys]
# graph_x extension should be done here
# to avoid chainer/chainerx mixed graph
if self.extend_graph_x:
for v in self.xs:
v *= 1.
gxs = chainer.grad(ys, self.xs, self.gys, self.gxs,
loss_scale=self.loss_scale)
expected = self.expected_grad()
for i, gx in enumerate(self.gxs):
expected[i] += gx
self.assertEqual(len(gxs), len(expected))
try:
for a, e in zip(gxs, expected):
testing.assert_allclose(self._get_value(a), self._get_value(e))
except Exception:
self._print_inputs()
self._print_variables('gxs (actual) ', gxs)
self._print_variables('gxs (expected)', expected)
raise
def test_grad(self, backend_config):
self.use_device(backend_config.device)
self.check_grad()
def check_double_grad(self):
self.forward()
ys = [getattr(self, name) for name in self.y_names]
gxs = chainer.grad(ys, self.xs, self.gys, self.gxs,
enable_double_backprop=True,
loss_scale=self.loss_scale)
y = sum(gxs)
ggxs = chainer.grad([y], self.xs)
expected = self.expected_double_grad()
self.assertEqual(len(ggxs), len(expected))
try:
for a, e in zip(ggxs, expected):
testing.assert_allclose(self._get_value(a), self._get_value(e))
except Exception:
self._print_inputs()
self._print_variables('gxs ', gxs)
self._print_variables('ggxs (actual) ', ggxs)
self._print_variables('ggxs (expected)', expected)
raise
def test_double_grad(self, backend_config):
self.use_device(backend_config.device)
self.check_double_grad()
@testing.parameterize(*testing.product({
'loss_scale': [None, 1, 10],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
]
)
class TestGradSimple(GradTestBase, unittest.TestCase):
x_names = 'x',
y_names = 'y',
def forward(self):
self.y = self.x * self.x
def expected_grad(self):
grad = 2 * self.x * self.gy
if self.loss_scale is not None:
grad *= self.loss_scale
return [grad]
def expected_double_grad(self):
ggrad = 2 * self.gy
if self.loss_scale is not None:
ggrad *= self.loss_scale
return [ggrad]
@testing.parameterize(*testing.product({
'loss_scale': [None, 1, 1.5, 2.5, 10],
}))
@testing.backend.inject_backend_tests(
None,
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestGradSimpleChainerX(GradTestBase, unittest.TestCase):
x_names = 'x',
y_names = 'y',
def forward(self):
self.y = self.x * self.x
def expected_grad(self):
grad = 2 * self.x * self.gy
return [grad]
def expected_double_grad(self):
ggrad = 2 * self.gy
return [ggrad]
@testing.parameterize(*testing.product({
'extend_graph_x': [False, True],
'extend_graph_y': [False, True],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestGradComplex(GradTestBase, unittest.TestCase):
x_names = 'x1', 'x2'
y_names = 'y1', 'y2'
def forward(self):
self.z = self.x1 * self.x1
self.y1 = self.z + self.x1 * self.x2 + self.x2
self.y2 = self.z + self.y1
def expected_grad(self):
dz_dx = 2 * self.x1
dy1_dx = self.gy1 + self.gy2
return [dy1_dx * (dz_dx + self.x2) + self.gy2 * dz_dx,
dy1_dx * (self.x1 + 1)]
def expected_double_grad(self):
dy1_dx = self.gy1 + self.gy2
return [3 * dy1_dx + 2 * self.gy2, dy1_dx]
class ExpPair(chainer.FunctionNode):
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
self.retain_outputs((0, 1))
return xp.exp(x), xp.exp(x)
def backward(self, target_input_indexes, grad_outputs):
return sum([
g * exp
for g, exp in zip(grad_outputs, self.get_retained_outputs())
if g is not None
]),
def exp_pair(x):
return ExpPair().apply((x,))
@testing.parameterize(*testing.product({
'keep_y2': [False, True],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestGradDelRetainedOutput(GradTestBase, unittest.TestCase):
x_names = 'x1',
y_names = 'y1',
def forward(self):
self.y1, y2 = exp_pair(self.x1)
if self.keep_y2:
self.y2 = y2
def expected_grad(self):
return [self.gy1 * self.y1]
def expected_double_grad(self):
return [self.gy1 * self.y1]
class ExpAndExpm1(chainer.FunctionNode):
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module()
y0 = xp.exp(x)
y1 = xp.expm1(x)
self.retain_outputs((0,))
return y0, y1
def backward(self, target_input_indexes, grad_outputs):
g0, g1 = grad_outputs
y0, = self.get_retained_outputs()
gx = []
if g0 is not None:
gx.append(g0 * y0)
if g1 is not None:
gx.append(g1 * y0)
return chainer.functions.add(*gx),
def exp_and_expm1(x):
return ExpAndExpm1().apply((x,))
class TestGradDelRetainedOutput2(unittest.TestCase):
def test_retain_output(self):
xp = numpy
x_array = xp.random.randn(3)
y1_grad = xp.random.randn(3)
x_grad_grad = xp.random.randn(3)
x = chainer.Variable(x_array, name='x')
y0, y1 = exp_and_expm1(x)
del y0
# (x: Variable) requires grad
# (y1_grad: ndarray) does not require grad
gx, = chainer.grad([y1], [x], [y1_grad], enable_double_backprop=True)
# assert gx == exp(x) * y1_grad
xp.testing.assert_allclose(
gx.array,
xp.exp(x.array) * y1_grad)
gx_, = chainer.grad([gx], [x], [x_grad_grad])
xp.testing.assert_allclose(
gx_.array,
gx.array * x_grad_grad)
class TestUnchainSplitGrad(unittest.TestCase):
def test_unchain_split(self):
x = chainer.Variable(numpy.arange(4).astype('f').reshape(2, 2))
h0, h1 = chainer.functions.split_axis(x, [1], axis=0)
y = chainer.functions.sum(h0)
z = chainer.functions.sum(h1)
w = y + z
h0.unchain()
dy_dh0 = numpy.array([[1., 1.]])
dz_dh1 = numpy.array([[1., 1.]])
dy_dx = None
dz_dx = numpy.array([[0., 0.], [1., 1.]])
dw_dx = numpy.array([[0., 0.], [1., 1.]])
testing.assert_allclose(chainer.grad([y], [h0])[0].array, dy_dh0)
testing.assert_allclose(chainer.grad([z], [h1])[0].array, dz_dh1)
assert chainer.grad([y], [x])[0] is dy_dx
testing.assert_allclose(chainer.grad([z], [x])[0].array, dz_dx)
testing.assert_allclose(chainer.grad([w], [x])[0].array, dw_dx)
class TestGradV3Compat1(unittest.TestCase):
def _var(self, val):
return chainer.Variable(numpy.array(val, numpy.float32))
def check(self, option, grads_before, grads_after):
vs = []
v = self._var(0.5)
for _ in range(4):
vs.append(v)
v += v
vs.append(v)
v *= 1.
_, x1, _, x2, _, y1, _, y2 = vs
gx1 = self._var(1000.)
gx2 = self._var(100.)
gy1 = self._var(10.)
gy2 = self._var(1.)
for v, g in zip(vs, grads_before):
if g is not None:
v.grad_var = self._var(g)
grads = chainer.grad(
[y1, y2], [x1, x2], [gy1, gy2], [gx1, gx2], **option)
numpy.testing.assert_allclose(grads[0].array, 1248.)
numpy.testing.assert_allclose(grads[1].array, 124.)
for v, ans in zip(vs, grads_after):
if ans is None:
self.assertIsNone(v.grad)
else:
numpy.testing.assert_allclose(v.grad, ans)
def test_no_option(self):
self.check({}, [None] * 8, [None] * 8)
self.check({}, [-1.] * 8, [-1.] * 8)
def test_set_grad(self):
self.check(
{'set_grad': True},
[None] * 8,
[None, 1248., None, 124., None, None, None, None])
self.check(
{'set_grad': True},
[-1.] * 8,
[-1., 1248., -1., 124., -1., -1., -1., -1.])
def test_retain_grad(self):
self.check(
{'retain_grad': True},
[None] * 8,
[None, 1248., 248., 124., 24., 12., 2., 1.]
# Before v5, the result was
# [None, 1248., 248., 124., 24., 12., 2., None]
)
self.check(
{'retain_grad': True},
[-1.] * 8,
[-1., 1248., 248., 124., 24., 12., 2., 1.]
# Before v5, the result was
# [-1., 1248., 248., 124., 24., 12., 2., -1.]
)
@attr.chainerx
class TestFunctionNodeBackwardChainerx(unittest.TestCase):
class SimpleFunctionNode(chainer.FunctionNode):
def __init__(self, backward_call_callback):
self.backward_call_callback = backward_call_callback
def forward(self, inputs):
return tuple([2 * x for x in inputs])
def backward(self, indexes, grad_outputs):
self.backward_call_callback({
'indexes': indexes, 'grad_outputs': grad_outputs})
gxs = []
for i_in in indexes:
gx = 2 * grad_outputs[i_in]
gxs.append(gx)
return gxs
def test_backward(self):
shape = (2, 3)
dtype = numpy.float32
x1 = chainerx.full(shape, 3, dtype)
x2 = chainerx.full(shape, 5, dtype).require_grad()
gx2_expected = numpy.full(shape, 2, dtype)
backward_call_args = []
def backward_call_callback(call_arg):
backward_call_args.append(call_arg)
# forward
func = self.SimpleFunctionNode(backward_call_callback)
y1, y2 = func.apply((x1, x2))
del func
assert y1.requires_grad
assert y2.requires_grad
# backward
y2.backward()
# check backward call arguments
assert len(backward_call_args) == 1
call_arg, = backward_call_args
assert isinstance(call_arg['indexes'], tuple)
assert call_arg['indexes'] == (1,)
assert isinstance(call_arg['grad_outputs'], tuple)
assert len(call_arg['grad_outputs']) == 2
assert call_arg['grad_outputs'][0] is None
chainerx.testing.assert_array_equal_ex(
call_arg['grad_outputs'][1].array, numpy.full(shape, 1, dtype),
strides_check=False)
# check grads
chainerx.testing.assert_array_equal_ex(
x2.grad, gx2_expected, strides_check=False)
assert not x2.grad.is_backprop_required()
with pytest.raises(chainerx.ChainerxError):
x1.grad
@attr.gpu
def test_backward_default_device(self):
# Default device in backward should be determined by arrays,
# otherwise, creation routines in backward do not create new arrays
# on the proper device.
device = chainerx.get_device('cuda:0')
shape = (2, 3)
dtype = numpy.float32
x1 = chainerx.full(shape, 3, dtype, device=device)
x2 = chainerx.full(shape, 5, dtype, device=device).require_grad()
backward_call_new_array = []
def backward_call_callback(call_arg):
backward_call_new_array.append(chainerx.empty(shape, dtype))
with chainerx.using_device('native:0'):
# forward
func = self.SimpleFunctionNode(backward_call_callback)
y1, y2 = func.apply((x1, x2))
# backward
y2.backward()
assert backward_call_new_array[0].device is device
testing.run_module(__name__, __file__)
| mit |
lhillber/qops | spectrum_statistics.py | 1 | 5109 | import numpy as np
from numpy.linalg import eigvalsh, eigh, matrix_power
import matplotlib.pyplot as plt
from scipy.special import gamma
from scipy.optimize import curve_fit
from qca import multipage
from figure3 import select, moving_average
# plotting defaults
import matplotlib as mpl
mpl.rcParams["text.latex.preamble"] = [r"\usepackage{amsmath}"]
font = {"size": 9, "weight": "normal"}
mpl.rcParams["mathtext.fontset"] = "stix"
mpl.rcParams["font.family"] = "STIXGeneral"
mpl.rcParams["pdf.fonttype"] = 42
mpl.rc("font", **font)
def brody_fit(x, n):
def brody_func(x, eta, A):
b = (gamma((eta + 2) / (eta + 1))) ** (eta + 1.0)
return A * b * (eta + 1.0) * x ** eta * np.exp(-b * x ** (eta + 1.0))
popt, pcov = curve_fit(brody_func, x, n, p0=[0.0, 1.0], bounds=[0, 1])
def func(x):
return brody_func(x, *popt)
return func, popt, pcov
from measures import renyi_entropy
L = 18
IC = "c1_f0"
Skey = [13, 14, 1, 6]
cs = ["darkturquoise", "darkorange", "limegreen", "crimson"]
for j, (c, S) in enumerate(zip(cs, Skey)):
sim = select(L, S, IC, V="H", BC="0")
h5file = sim["h5file"]
d = h5file["cut_half"][:]
for ti, rho in enumerate(d[100:101]):
spec, vecs = eigh(rho)
fig, axs = plt.subplots(1, 2, figsize=(6, 3))
print(renyi_entropy(rho))
axs[0].set_title("spectrum")
axs[0].semilogy(spec, color=c, marker="o")
axs[1].set_title("column vector magnitude")
axs[1].imshow(np.abs(vecs), cmap="gist_gray_r")
fig.suptitle("$T_{%d}$"%S)
multipage("figures/figure4/eigenRDM.pdf")
plt.close("all")
print("done")
if __name__ == "__main__":
L = 18
IC = "c1_f0"
Skey = [13, 14, 1, 6]
cs = ["darkturquoise", "darkorange", "limegreen", "crimson"]
fig, axs = plt.subplots(1, 1, figsize=(4, 3), sharex=False)
fig2, axs2 = plt.subplots(1, 1, figsize=(3, 2), sharex=True)
fig2.subplots_adjust(left=0.2, bottom=0.2, hspace=0.1)
fig3s = []
for j, (c, S) in enumerate(zip(cs, Skey)):
sim = select(L, S, IC, V="H", BC="0")
h5file = sim["h5file"]
try:
espec = h5file["espec"]
except:
d = h5file["cut_half"]
espec = np.zeros((d.shape[0], d.shape[1]))
for ti, rho in enumerate(d):
espec[ti, :] = eigvalsh(rho)
h5file["espec"] = espec
etas = []
detas = []
svns = []
ii = 0
t0 = 10
fig3, axs3 = plt.subplots(3, 3, figsize=(4, 4), sharex=True, sharey=True)
for ti, es in enumerate(espec[t0:1000]):
es = es[es > 1e-6]
NN = len(es)
es = np.sort(es)
es = es[NN // 3 : 2 * NN // 3]
ns = range(len(es))
s = es[1:] - es[:-1]
s /= np.mean(s)
n, bin, _ = axs.hist(
s, density=True, alpha=1, histtype="step", bins=10, log=False
)
x = (bin[1:] + bin[:-1]) / 2.0
xs = np.linspace(x[0], x[-1], 100)
xs = xs[xs > 0]
func, popt, pcov = brody_fit(x, n)
detas.append(np.sqrt(np.diag(pcov)[0]))
etas.append(popt[0])
if (ti+t0) % 100 == 0:
row, col = ii // 3, ii % 3
ax3 = axs3[row, col]
dx = x[1] - x[0]
n = np.insert(n, 0, 0)
n = np.insert(n, len(n), 0)
x = np.insert(x, 0, x[0] - dx / 2)
x = np.insert(x, len(x), x[-1] + dx / 2)
ax3.step(x, n, where="mid")
ax3.plot(xs, func(xs))
ax3.set_title(f"t={t0+ti}", pad=-13)
fig3.suptitle("$R = %d$" % S)
ii += 1
if col == 1 and row == 2:
ax3.set_xlabel("$\delta E/\overline{\delta E}$")
if col == 0 and row == 1:
ax3.set_ylabel("density")
ax3.tick_params(direction="inout")
fig3.subplots_adjust(hspace=0, wspace=0)
fig3s.append(fig3)
etas = np.array(etas)
detas = np.array(detas)
ts = np.arange(2, len(etas) + 2)
mask = detas < 1
etas = etas[mask]
detas = detas[mask]
ts = ts[mask]
if S == 6:
pass
else:
if S == 13:
label = r"$R = %s$" % S
else:
label = str(S)
aetas = moving_average(etas, n=L)
axs2.plot(aetas, marker=None, color=c, label=label, lw=1)
avgerr = np.mean(detas)
axs2.plot(ts, etas, c=c, alpha=0.3)
#axs2.errorbar(ts, etas, yerr=detas, color=c)
axs2.set_xticks([0, 250, 500, 750, 1000])
axs2.legend(loc="lower right")
axs.set_xlabel("$\delta E / \overline{\delta E}$")
axs2.set_xlabel("$t$")
axs2.set_ylabel("$\eta$")
fig.tight_layout()
fig2.tight_layout()
multipage(
"figures/figure4/spectrum_statistics_fixed-10bins.pdf",
figs=[fig2] + fig3s,
clip=True,
dpi=10 * fig.dpi,
)
| mit |
William-Byrne/appinventor-sources | appinventor/misc/componentcreator/componentcreator.py | 77 | 6421 | from Tkinter import Tk, BOTH, END, IntVar, StringVar, LEFT
from ttk import Button, Checkbutton, Frame, Style, Combobox, Label, Entry
from tkFileDialog import askopenfilename
import tkMessageBox
from helpers import *
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack(fill=BOTH, expand=1)
self.initUI()
self.setGeometry()
self.component = NewComponent()
def setGeometry(self):
x = 300
y = 100
self.master.geometry("400x300+%d+%d" % (x, y))
self.master.update()
def initUI(self):
#setup title
self.master.title("Component Creator")
self.style = Style()
self.style.theme_use("clam")
#indicator label
self.labelName = Label(self, text="Component Name:")
self.labelName.place(x=10, y=10)
self.master.update()
# create variable and namefield for input of component name
sv = StringVar()
sv.trace("w", lambda name, index, mode, sv=sv: self.nameChanged(sv))
self.nameField = Entry(self, textvariable=sv)
self.nameField.place(x=10+self.labelName.winfo_width() + 10, y=10)
self.master.update()
# label for image name that will show img name for a given component name
self.imgNameVar = StringVar()
self.imgNameVar.set('imageName:')
self.labelImageName = Label(self, textvariable=self.imgNameVar)
self.labelImageName.place(x=10+self.labelName.winfo_width()+10,y=40)
# checkbox for visible component or not
self.cbVar = IntVar()
self.cb = Checkbutton(self, text="Visible Component", variable=self.cbVar)
self.cb.place(x=10, y=70)
# dropdown list for category
self.labelCategory = Label(self, text="Category:")
self.labelCategory.place(x=10, y=110)
self.master.update()
acts = ['UserInterface', 'Layout', 'Media', 'Animation', 'Sensors', 'Social', 'Storage',
'Connectivity', 'LegoMindStorms', 'Experimental', 'Internal', 'Uninitialized']
self.catBox = Combobox(self, values=acts)
self.catBox.place(x=10+self.labelCategory.winfo_width()+10, y=110)
# button to select icon image
self.getImageButton = Button(self, text="Select icon", command=self.getImage)
self.getImageButton.place(x=10, y=150)
self.master.update()
# explanation for resizing
self.resizeVar = IntVar()
self.resizeCB = Checkbutton(self,
text="ON=Resize Image (Requires PIL)\nOFF=Provide 16x16 Image", variable=self.resizeVar)
self.resizeCB.place(x=10+self.getImageButton.winfo_width()+10, y=150)
# create button
self.createButton = Button(self, text="Create", command=self.create)
self.createButton.place(x=10, y=230)
#cancel button
self.cancelButton = Button(self, text="Cancel", command=self.quit)
self.cancelButton.place(x=200, y=230)
# open file picker for selecting an icon
def getImage(self):
ftypes = [('All Picture Files', ('*.jpg', '*.png', '*.jpeg', '*.bmp')), ('All files', '*')]
self.component.imgFile = askopenfilename(filetypes=ftypes, title="Select an Icon file")
# update component name and image name for component by lowercasing first letter
def nameChanged(self, sv):
s = sv.get()
self.component.compName = s
self.component.compImgName = s[:1].lower() + s[1:] if s else ''
self.imgNameVar.set('imageName: %s' % self.component.compImgName)
# tries to create component
def create(self):
# sets parameters for new component based on input values
self.component.visibleComponent = bool(self.cbVar.get())
self.component.resizeImage = bool(self.resizeVar.get())
self.component.category = self.catBox.get().upper()
self.component.compName = self.nameField.get()
try:
# check if component already exists
try:
open('../../components/src/com/google/appinentor/components/runtime/%s.java', 'r')
tkMessageBox.showerror("Duplicate Component","%s already exists" % self.component.compName)
# if doesnt exist will raise error
except IOError:
# check for name input
if not self.component.compImgName:
tkMessageBox.showerror("Missing Name","Please enter component name")
return
#check for category selection
if not self.component.category:
tkMessageBox.showerror("Missing Category","Please select a category")
return
# check if selected an icon
if not self.component.imgFile:
tkMessageBox.showerror("Missing Icon","Please select an icon image")
return
# copy image file to folder, can get error if user checked resize and doest have PIL installed
try:
self.component.copyImageToFolder()
except ImportError, e:
tkMessageBox.showerror("Unable to import PIL","Please install PIL or unselect checkbox")
return
# add references to the image file, can get error if component already exists
try:
self.component.addImageReference()
except DuplicateError, e:
tkMessageBox.showerror("Duplicate Component","%s already exists" % self.component.compName)
return
# will create mock component if is visible and add references to SimpleComponentDescriptor
self.component.createMockComponent()
# will create the actual component file
self.component.createComponent()
tkMessageBox.showinfo('Success', 'Component created successfully')
# if could not open some file for writing
except Exception, e:
tkMessageBox.showerror("Exception",str(e))
def main():
root = Tk()
# sets window size and geometry
app = Application(master=root)
root.mainloop()
if __name__ == '__main__':
main()
| apache-2.0 |
Zhongqilong/mykbengineer | kbe/src/lib/python/Lib/xml/dom/expatbuilder.py | 91 | 35755 | """Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
elif len(parts) == 2:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
else:
raise ValueError("Unsupported syntax: spaces in URIs not supported: %r" % name)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
node.data = value
return
else:
node = minidom.Text()
node.data = data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
node.data = node.data + data
return
node = minidom.Text()
node.data = node.data + data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
a.value = value
a.ownerDocument = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
a.value = uri
a.ownerDocument = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
node._ensure_attributes()
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
a.ownerDocument = self.document
a.value = value
a.ownerElement = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
with open(file, 'rb') as fp:
result = builder.parseFile(fp)
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
with open(file, 'rb') as fp:
result = builder.parseFile(fp)
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
| lgpl-3.0 |
foobarbazblarg/stayclean | stayclean-2018-september/participant.py | 60 | 1524 | import datetime
class Participant:
def __init__(self):
self.name = ""
self.isStillIn = True
self.hasCheckedIn = False
self.relapseDate = None
@property
def hasRelapsed(self):
return self.relapseDate is not None
def setFromLine(self, lineString):
# format of participants.txt line:
# name hasCheckedIn isStillIn
# e.g.:
# foobarbazblarg True True
words = lineString.split()
self.name = words[0]
self.hasCheckedIn = words[1] == 'True'
self.isStillIn = words[2] == 'True'
if len(words) >= 4:
self.relapseDate = datetime.datetime.strptime(words[3], "%Y.%m.%d").date()
def relapseNowIfNotAlready(self):
if self.isStillIn:
self.isStillIn = False
self.relapseDate = datetime.date.today()
def relapseDayOfWeekIndex(self):
if self.relapseDate:
return self.relapseDate.weekday()
else:
return None
def relapseDayOfWeekName(self):
if self.relapseDayOfWeekIndex():
return {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[self.relapseDayOfWeekIndex()]
else:
return None
def asLine(self):
answer = self.name + " " + str(self.hasCheckedIn) + " " + str(self.isStillIn)
if self.relapseDate:
answer += " "
answer += self.relapseDate.strftime("%Y.%m.%d")
return answer
| mit |
amitdo/ocropy | OLD/mlp.py | 15 | 22381 | ################################################################
### Native code neural network with backpropagation training.
################################################################
from __future__ import with_statement
__all__ = "MLP".split()
import os,os.path
import random as pyrandom
from random import sample as selection, uniform
from numpy import *
from pylab import *
from scipy import *
import common as ocrolib
from native import *
class Record:
"""Useful record data structure."""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __str__(self):
return str(self.__dict__)
def c_order(a):
"""Check whether the elements of the array are in C order."""
return tuple(a.strides)==tuple(sorted(a.strides,reverse=1))
def error(net,data,cls,subset=None):
"""Compute the error rate of the classifier on the given data."""
predicted = net.classify(data,subset=subset)
if subset is not None:
cls = take(cls,subset)
assert predicted.shape==cls.shape,\
"wrong shape (predicted vs cls): %s != %s"%(predicted.shape,cls.shape)
return sum(cls!=predicted)
def finite(x):
"Make sure that all entries of x are finite."
return not isnan(x).any() and not isinf(x).any()
nnet_native_c = r'''
#include <math.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <omp.h>
int verbose = 0;
int maxthreads = 1;
int maxthreads_train = 4;
double sigmoid(double x);
double max(double x,double y);
#define MIN(x,y) ((x)<(y)?(x):(y))
inline double sigmoid(double x) {
if(x<-200) x = -200;
else if(x>200) x = 200;
return 1.0/(1.0+exp(-x));
}
inline double max(double x,double y) {
if(x>y) return x; else return y;
}
void forward(int n,int m,int l,float w1[m][n],float b1[m],float w2[l][m],float b2[l],
int k,float data[k][n],float outputs[k][l]) {
if(verbose) printf("forward %d:%d:%d (%d)\n",n,m,l,k);
#pragma omp parallel for num_threads (maxthreads)
for(int row=0;row<k;row++) {
float *x = data[row];
float y[m];
float *z = outputs[row];
for(int i=0;i<m;i++) {
double total = b1[i];
for(int j=0;j<n;j++) total += w1[i][j]*x[j];
y[i] = sigmoid(total);
}
for(int i=0;i<l;i++) {
double total = b2[i];
for(int j=0;j<m;j++) total += w2[i][j]*y[j];
z[i] = sigmoid(total);
}
}
}
int argmax(int k,float z[k]) {
int mi = 0;
float mv = z[0];
for(int i=1;i<k;i++) {
if(z[i]<mv) continue;
mv = z[i];
mi = i;
}
return mi;
}
void classify(int n,int m,int l,float w1[m][n],float b1[m],float w2[l][m],float b2[l],
int k,float data[k][n],int classes[k]) {
if(verbose) printf("classify %d:%d:%d (%d)\n",n,m,l,k);
#pragma omp parallel for num_threads (maxthreads)
for(int row=0;row<k;row++) {
float *x = data[row];
float y[m];
float z[l];
for(int i=0;i<m;i++) {
double total = b1[i];
for(int j=0;j<n;j++) total += w1[i][j]*x[j];
y[i] = sigmoid(total);
}
for(int i=0;i<l;i++) {
double total = b2[i];
for(int j=0;j<m;j++) total += w2[i][j]*y[j];
z[i] = sigmoid(total);
}
classes[row] = argmax(l,z);
}
}
void backward(int n,int m,int l,float w1[m][n],float b1[m],float w2[l][m],float b2[l],
int k,float data[k][n],int classes[k],float eta,int ntrain,
int nsamples,int samples[nsamples]) {
if(verbose) printf("backward %d:%d:%d (%d)\n",n,m,l,k);
assert(eta>0.0);
assert(eta<10.0);
/* NB: these are lock-free parallel updates */
#pragma omp parallel for num_threads (maxthreads)
for(int trial=0;trial<ntrain;trial++) {
int row;
if(nsamples>0) row = samples[(unsigned)(19.73*k*sin(trial))%nsamples];
else row = (unsigned)(19.73*k*sin(trial))%k;
// forward pass
float *x = data[row];
float y[m],z[l],delta2[l],delta1[m];
for(int i=0;i<m;i++) {
double total = b1[i];
for(int j=0;j<n;j++) total += w1[i][j]*x[j];
y[i] = sigmoid(total);
assert(!isnan(y[i]));
}
for(int i=0;i<l;i++) {
double total = b2[i];
for(int j=0;j<m;j++) total += w2[i][j]*y[j];
z[i] = sigmoid(total);
assert(!isnan(z[i]));
}
// backward pass
int cls = classes[row];
for(int i=0;i<l;i++) {
double total = (z[i]-(i==cls));
delta2[i] = total * z[i] * (1-z[i]);
}
for(int i=0;i<m;i++) {
double total = 0.0;
for(int j=0;j<l;j++)
total += delta2[j] * w2[j][i];
delta1[i] = total * y[i] * (1-y[i]);
}
for(int i=0;i<l;i++) {
for(int j=0;j<m;j++) {
w2[i][j] -= eta*delta2[i]*y[j];
}
}
for(int i=0;i<m;i++) {
for(int j=0;j<n;j++) {
w1[i][j] -= eta*delta1[i]*x[j];
}
}
}
}
typedef signed char byte;
#define BSCALE 100.0
void forward_b(int n,int m,int l,float w1[m][n],float b1[m],float w2[l][m],float b2[l],
int k,byte data[k][n],float outputs[k][l]) {
if(verbose) printf("forward %d:%d:%d (%d)\n",n,m,l,k);
#pragma omp parallel for num_threads (maxthreads)
for(int row=0;row<k;row++) {
byte *x = data[row];
float y[m];
float *z = outputs[row];
for(int i=0;i<m;i++) {
double total = b1[i];
for(int j=0;j<n;j++) total += w1[i][j]*x[j]/BSCALE;
y[i] = sigmoid(total);
}
for(int i=0;i<l;i++) {
double total = b2[i];
for(int j=0;j<m;j++) total += w2[i][j]*y[j];
z[i] = sigmoid(total);
}
}
}
void classify_b(int n,int m,int l,float w1[m][n],float b1[m],float w2[l][m],float b2[l],
int k,byte data[k][n],int classes[k]) {
if(verbose) printf("classify %d:%d:%d (%d)\n",n,m,l,k);
#pragma omp parallel for num_threads (maxthreads)
for(int row=0;row<k;row++) {
byte *x = data[row];
float y[m];
float z[l];
for(int i=0;i<m;i++) {
double total = b1[i];
for(int j=0;j<n;j++) total += w1[i][j]*x[j]/BSCALE;
y[i] = sigmoid(total);
}
for(int i=0;i<l;i++) {
double total = b2[i];
for(int j=0;j<m;j++) total += w2[i][j]*y[j];
z[i] = sigmoid(total);
}
classes[row] = argmax(l,z);
}
}
void backward_b(int n,int m,int l,float w1[m][n],float b1[m],float w2[l][m],float b2[l],
int k,byte data[k][n],int classes[k],float eta,int ntrain,
int nsamples,int samples[nsamples]) {
if(verbose) printf("backward %d:%d:%d (%d)\n",n,m,l,k);
assert(eta>0.0);
assert(eta<10.0);
/* NB: these are lock-free parallel updates */
#pragma omp parallel for num_threads (maxthreads_train)
for(int trial=0;trial<ntrain;trial++) {
int row;
if(nsamples>0) row = samples[(unsigned)(19.73*k*sin(trial))%nsamples];
else row = (unsigned)(19.73*k*sin(trial))%k;
// forward pass
byte *x = data[row];
float y[m],z[l],delta2[l],delta1[m];
for(int i=0;i<m;i++) {
double total = b1[i];
for(int j=0;j<n;j++) total += w1[i][j]*x[j]/BSCALE;
y[i] = sigmoid(total);
assert(!isnan(y[i]));
}
for(int i=0;i<l;i++) {
double total = b2[i];
for(int j=0;j<m;j++) total += w2[i][j]*y[j];
z[i] = sigmoid(total);
assert(!isnan(z[i]));
}
// backward pass
int cls = classes[row];
for(int i=0;i<l;i++) {
double total = (z[i]-(i==cls));
delta2[i] = total * z[i] * (1-z[i]);
}
for(int i=0;i<m;i++) {
double total = 0.0;
for(int j=0;j<l;j++)
total += delta2[j] * w2[j][i];
delta1[i] = total * y[i] * (1-y[i]);
}
for(int i=0;i<l;i++) {
for(int j=0;j<m;j++) {
w2[i][j] -= eta*delta2[i]*y[j];
}
}
for(int i=0;i<m;i++) {
for(int j=0;j<n;j++) {
w1[i][j] -= eta*delta1[i]*x[j]/BSCALE;
}
}
}
}
'''
nnet_native = None
def nnet_native_load():
global nnet_native
if nnet_native is not None: return
nnet_native = compile_and_load(nnet_native_c)
nnet_native.forward.argtypes = [I,I,I,A2F,A1F,A2F,A1F, I,A2F,A2F]
nnet_native.classify.argtypes = [I,I,I,A2F,A1F,A2F,A1F, I,A2F,A1I]
nnet_native.backward.argtypes = [I,I,I,A2F,A1F,A2F,A1F, I,A2F,A1I,F,I,I,A1I]
nnet_native.forward_b.argtypes = [I,I,I,A2F,A1F,A2F,A1F, I,A2B,A2F]
nnet_native.classify_b.argtypes = [I,I,I,A2F,A1F,A2F,A1F, I,A2B,A1I]
nnet_native.backward_b.argtypes = [I,I,I,A2F,A1F,A2F,A1F, I,A2B,A1I,F,I,I,A1I]
global nverbose,maxthreads,maxthreads_train
nverbose = c_int.in_dll(nnet_native,"verbose")
maxthreads = c_int.in_dll(nnet_native,"maxthreads")
maxthreads_train = c_int.in_dll(nnet_native,"maxthreads_train")
if os.getenv("mlp_maxthreads") is not None:
maxthreads.value = int(os.getenv("mlp_maxthreads"))
if os.getenv("mlp_maxthreads_train") is not None:
maxthreads_train.value = int(os.getenv("mlp_maxthreads_train"))
class MLP:
def __init__(self,**kw):
self.w1 = None
self.verbose = 0
self.etas = [(0.1,100000)]*30
ocrolib.set_params(self,kw,warn=0)
self.eta = 0.1
self.error_rate = 0
self.training_log = []
def copy(self):
mlp = MLP()
mlp.w1 = self.w1.copy()
mlp.w2 = self.w2.copy()
mlp.b1 = self.b1.copy()
mlp.b2 = self.b2.copy()
mlp.verbose = self.verbose
mlp.err = -1
return mlp
def checkWeightShape(self):
"""Ensure that the internal weights have the right shape and
alignment for passing them to native code."""
assert c_order(self.w1)
assert c_order(self.b1)
assert c_order(self.w2)
assert c_order(self.b2)
assert self.w1.flags["ALIGNED"]
assert self.b1.flags["ALIGNED"]
assert self.w2.flags["ALIGNED"]
assert self.b2.flags["ALIGNED"]
assert self.w1.shape[0]==self.w2.shape[1]
def init(self,data,cls,nhidden=None,eps=1e-2):
"""Initialize the network but perform no training yet. The network units
are initialized using the data, and the classes are used to determine the number
of output units and (if no number of hidden units is given) the number of
hidden units."""
data = data.reshape(len(data),prod(data.shape[1:]))
scale = max(abs(amax(data)),abs(amin(data)))
# ninput = data.shape[1]
if nhidden is None: nhidden = len(set(cls))
noutput = amax(cls)+1
self.w1 = array(data[selection(xrange(len(data)),nhidden)] * eps/scale,'f')
self.b1 = array(uniform(-eps,eps,(nhidden,)),'f')
self.w2 = array(uniform(-eps,eps,(noutput,nhidden)),'f')
self.b2 = array(uniform(-eps,eps,(noutput,)),'f')
def decreaseHidden(self,data,cls,new_nhidden):
"""Decrease the number of hidden units. Data and cls might be used to
pick which hidden units to delete (but currently are unused)."""
ninput,nhidden,noutput = self.shape()
keep = array([True]*nhidden)
for i in selection(xrange(nhidden),nhidden-new_nhidden):
keep[i] = False
self.w1 = array(self.w1[keep,:],dtype='f',order="C")
self.b1 = array(self.b1[keep],dtype='f',order="C")
self.w2 = array(self.w2[:,keep],dtype='f',order="C")
def increaseHidden(self,data,cls,new_nhidden):
"""Increase the number of hidden units. Data and cls are used to pick
initial values for new hidden units."""
nhidden = self.nhidden()
vs = []
bs = []
delta = new_nhidden-nhidden
for i in range(delta):
a,b = selection(xrange(nhidden),2)
l = 0.8*rand(1)[0]+0.1
v = l*self.w1[a] + (1-l)*self.w1[b]
vs.append(v)
b = l*self.b1[a] + (1-l)*self.b1[b]
bs.append(b)
self.w1 = array(1.0*vstack([self.w1,array(vs)]),dtype='f',order="C")
self.b1 = array(1.0*hstack([self.b1,array(bs)]),dtype='f',order="C")
scale = 0.01*mean(abs(self.w2))
vecs = [self.w2,scale*randn(len(self.w2),delta)]
self.w2 = array(1.0*hstack(vecs),dtype='f',order="C")
def changeHidden(self,data,cls,new_nhidden,subset=None):
"""Increase or decrease the number of hidden units. Uses data and cls
to initialize new units."""
if self.nhidden()==new_nhidden: return
elif self.nhidden()>new_nhidden: self.decreaseHidden(data,cls,new_nhidden)
else: self.increaseHidden(data,cls,new_nhidden)
self.checkWeightShape()
def nhidden(self):
"""Return the number of hidden units."""
return self.w1.shape[0]
def shape(self):
"""Return the shape of the network (a tuple consisting of the number of input units,\
hidden units, and output units."""
assert self.w1.shape[0]==self.w2.shape[1]
return self.w1.shape[1],self.w1.shape[0],self.w2.shape[0]
def train(self,data,cls,etas=None,
nhidden=None,eps=1e-2,subset=None,verbose=0,samples=None):
"""Train the network on the given data with the given learning rate.
Data is a 2D array with the rows representing input samples.
Cls is a 1D array of integers indicating the desired output class.
Initializes the network first. Can train on subsets. Etas is a list of pairs of
learning rates and update steps."""
nnet_native_load()
if etas is None: etas = self.etas
data = data.reshape(len(data),prod(data.shape[1:]))
if subset is not None:
data = take(data,subset,axis=0)
cls = take(cls,subset)
cls = array(cls,'i')
if self.w1==None:
self.init(data,cls,nhidden=nhidden,eps=eps)
if verbose:
err = error(self,data,cls)
rate = err*1.0/len(data)
print "starting",data.shape,data.dtype
print "error",rate,err,len(data)
print "ranges",amin(self.w1),amax(self.w1),amin(self.w2),amax(self.w2)
n,m,l = self.shape()
for i in range(len(etas)):
eta,batchsize = etas[i]
if verbose: print "native batch",i,eta,batchsize
assert cls.dtype==dtype('i')
assert amin(cls)>=0 and amax(cls)<10000
assert eta>0.0 and eta<10.0
assert type(batchsize)==int
self.checkWeightShape()
if samples is None: samples = array([],'i')
assert samples.dtype==dtype('i')
if data.dtype==dtype('f'):
assert amin(data)>-100.0 and amax(data)<100
nnet_native.backward(n,m,l,self.w1,self.b1,self.w2,self.b2,
len(data),data,cls,eta,batchsize,
len(samples),samples)
elif data.dtype==dtype('int8'):
nnet_native.backward_b(n,m,l,self.w1,self.b1,self.w2,self.b2,
len(data),data,cls,eta,batchsize,
len(samples),samples)
else:
raise Internal("data has unknown type (%s)"%data.dtype)
err = error(self,data,cls)
rate = err*1.0/len(data)
if verbose:
print "error",rate,err,len(data)
print "ranges",amin(self.w1),amax(self.w1),amin(self.w2),amax(self.w2)
self.error_rate = rate
self.training_log.append((eta,batchsize,self.error_rate))
def outputs(self,data,subset=None):
"""Given a 2D array of input vectors, with the rows corresponding to each
input, computs the corresponding output vector; this approximates posterior
probability for each class in classification problems."""
nnet_native_load()
data = data.reshape(len(data),prod(data.shape[1:]))
assert data.shape[1]==self.w1.shape[1],\
"input shape: %s w1: %s"%(data.shape,self.w1.shape)
if subset is not None:
data = take(data,subset,axis=0)
# cls = take(cls,subset)
result = zeros((len(data),self.w2.shape[0]),dtype='f')
n,m,l = self.shape()
if data.dtype==dtype('f'):
# if amin(data)<-100 or amax(data)>100: print data
assert amin(data)>-100.0 and amax(data)<100,\
"mlp input out of range: %g %g"%(amin(data),amax(data))
nnet_native.forward(n,m,l,self.w1,self.b1,self.w2,self.b2,
len(data),data,result)
elif data.dtype==dtype('int8'):
nnet_native.forward_b(n,m,l,self.w1,self.b1,self.w2,self.b2,
len(data),data,result)
else:
raise Internal("data has unknown type: %s"%data.dtype)
return result
def classify(self,data,subset=None):
data = data.reshape(len(data),prod(data.shape[1:]))
assert data.shape[1]==self.w1.shape[1]
if subset is not None:
data = take(data,subset,axis=0)
result = zeros(len(data),dtype='i')
n,m,l = self.shape()
if data.dtype==dtype('f'):
assert amin(data)>-100.0 and amax(data)<100
nnet_native.classify(n,m,l,self.w1,self.b1,self.w2,self.b2,
len(data),data,result)
elif data.dtype==dtype('int8'):
nnet_native.classify_b(n,m,l,self.w1,self.b1,self.w2,self.b2,
len(data),data,result)
else:
raise Internal("data has unknown type")
return result
def log_uniform(lo,hi):
return exp(pyrandom.uniform(log(lo),log(hi)))
class AutoMLP(MLP):
def __init__(self,**kw):
# fairly conservative default settings that result
# in reasonably good performance for many problems
self.verbose = 0
self.initial_nhidden = [20,40,60,80,120,160]
self.initial_eta = (0.1,0.5)
self.initial_epochs = 5
self.initial_ntrain = 1000000
self.log_eta_var = 0.2
self.log_nh_var = 0.2
self.min_round = 100000
self.max_round = 10000000
self.epochs_per_round = 5
self.max_rounds = 48
self.max_pool = 3
ocrolib.set_params(self,kw,warn=0)
self.kw = kw
def train1(self,data,classes,verbose=0):
n = len(data)
testing = array(selection(xrange(n),n/10),'i')
training = setdiff1d(array(xrange(n),'i'),testing)
testset = data[testing,:]
testclasses = classes[testing]
ntrain = min(self.initial_epochs*n,self.initial_ntrain)
pool = []
for nh in self.initial_nhidden:
mlp = MLP(**self.kw)
mlp.eta = log_uniform(*self.initial_eta)
mlp.train(data,classes,etas=[(mlp.eta,ntrain)],
nhidden=nh,
verbose=0,
samples=training)
mlp.err = error(mlp,testset,testclasses)
if verbose: print "AutoMLP initial","%.3f"%mlp.eta,nh,\
mlp.err,"%.4f"%(mlp.err*1.0/len(testset))
pool.append(mlp)
for i in range(self.max_rounds):
# if the pool is too large, pick only the best models
errs = [x.err+0.1*x.nhidden() for x in pool]
if len(errs)>self.max_pool:
choice = argsort(errs)
pool = list(take(pool,choice[:self.max_pool]))
# pick a random model from the pool
mlp = selection(pool,1)[0]
mlp = mlp.copy()
# compute random learning rates and number of hidden units
new_eta = exp(log(mlp.eta)+randn()*self.log_eta_var)
new_nh = max(2,int(exp(log(mlp.nhidden())+randn()*self.log_nh_var)))
# train with the new parameters
mlp.eta = new_eta
mlp.changeHidden(data,classes,new_nh)
mlp.train(data,classes,etas=[(mlp.eta,ntrain)],
verbose=(self.verbose>1),samples=training)
# determine error on test set
mlp.err = error(mlp,testset,testclasses)
if verbose:
print "AutoMLP pool",mlp.err,"%.4f"%(mlp.err*1.0/len(testset)),\
"(%.3f,%d)"%(mlp.eta,mlp.nhidden()),\
[x.err for x in pool]
pool += [mlp]
# to allow partial training, update this with the best model so far
best = argmin([x.err+0.1*x.nhidden() for x in pool])
mlp = pool[best]
self.assign(mlp)
yield Record(round=i,rounds=self.max_rounds,testerr=mlp.err*1.0/len(testset))
def train(self,data,classes,verbose=1):
"""Train the network on the given data with the given learning rate.
Data is a 2D array with the rows representing input samples.
This automatically adapts learning rates and numbers of hidden units.
There are still some metaparameters that can be set (see the __init__ method),
but for most problems, that's not necessary."""
for progress in self.train1(data,classes,verbose=verbose):
if verbose: print "progress",progress
def assign(self,mlp):
for k,v in mlp.__dict__.items():
if k[0]=="_" or k[-1]=="_": continue
setattr(self,k,v)
def test():
global data,classes,mlp
data = array(randn(10000,2),'f')
data = array(2*(data>0)-1,'f')
data += 0.1*randn(10000,2)
classes = array(data[:,0]*data[:,1]>0,'i')
data += 0.4
bdata = array(100.0*clip(data,-1.1,1.1),'int8')
mlp = AutoMLP()
mlp.max_rounds = 32
mlp.train(bdata[:9000,:],classes[:9000],verbose=1)
pred = mlp.classify(data[9000:])
print sum(pred!=classes[9000:])
print mlp.w1.shape,mlp.w2.shape
| apache-2.0 |
xme1226/horizon | openstack_dashboard/dashboards/admin/metering/forms.py | 17 | 2858 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import forms
class UsageReportForm(forms.SelfHandlingForm):
PERIOD_CHOICES = (("1", _("Last day")),
("7", _("Last week")),
(str(datetime.date.today().day), _("Month to date")),
("15", _("Last 15 days")),
("30", _("Last 30 days")),
("365", _("Last year")),
("other", _("Other")),
)
period = forms.ChoiceField(label=_("Period"),
required=True,
choices=PERIOD_CHOICES)
date_from = forms.DateField(label=_("From"), required=False,
widget=forms.TextInput(
attrs={'data-line-chart-command':
'date_picker_change'}))
date_to = forms.DateField(label=_("To"), required=False,
widget=forms.TextInput(
attrs={'data-line-chart-command':
'date_picker_change'}))
def clean_date_from(self):
period = self.cleaned_data['period']
date_from = self.cleaned_data['date_from']
if period == 'other' and date_from is None:
raise ValidationError(_('Must specify start of period'))
return date_from
def clean_date_to(self):
data = super(UsageReportForm, self).clean()
date_from = data.get('date_from')
date_to = data.get('date_to')
period = data.get('period')
if (period == 'other' and date_to is not None
and date_from is not None and date_to < date_from):
raise ValidationError(_("Start must be earlier "
"than end of period."))
else:
return date_to
def handle(self, request, data):
if hasattr(request, 'session'):
request.session['date_from'] = data['date_from']
request.session['date_to'] = data['date_to']
request.session['period'] = data['period']
return data
| apache-2.0 |
microelly2/geodata | geodat/miki.py | 1 | 30042 | # -*- coding: utf-8 -*-
#-------------------------------------------------
#-- miki - my kivy like creation tools
#--
#-- microelly 2016,2018,2019 (py3)
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
''' kivy like creation tool'''
# pylint: disable=W0331
# pylint: disable=unused-import
# pylint: disable=invalid-name
# xpylint: disable=bare-except
# xpylint: disable=exec-used
import FreeCAD
import FreeCADGui
#from transportationwb.say import sayexc, say
#from transportationwb.say import *
import sys
if sys.version_info[0] !=2:
from importlib import reload
from geodat.say import sayexc, say
from geodat.say import *
from PySide import QtGui, QtCore
import re
import pivy
from pivy import coin
#import nurbswb.configuration
#reload (nurbswb.configuration)
#from nurbswb.configuration import getcb
def getMainWindow():
'''returns the main window'''
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise Exception("No main window found")
def getComboView(mw):
'''returns the Combo View widget'''
dw = mw.findChildren(QtGui.QDockWidget)
for i in dw:
if str(i.objectName()) == "Combo View":
return i.findChild(QtGui.QTabWidget)
elif str(i.objectName()) == "Python Console":
return i.findChild(QtGui.QTabWidget)
raise Exception("No tab widget found")
def ComboViewShowWidget(widget, tabMode=False):
'''create a tab widget inside the combo view'''
# stopp to default
if not tabMode:
widget.show()
return widget
mw = getMainWindow()
tab = getComboView(mw)
c = tab.count()
# clear the combo window
for i in range(c - 1, 1, -1):
tab.removeTab(i)
# start the requested tab
tab.addTab(widget, widget.tabname)
tab.setCurrentIndex(2)
widget.tab = tab
return widget
def creatorFunction(name):
'''generates a python code string for the creation of a Qt/Part/So-Object'''
print ("create object",name)
if name.startswith('Part.'):
[_, c] = name.split('.')
return "App.activeDocument().addObject('Part::" + c + "','test')"
if name.startswith('So'):
return "coin." + name + '()'
if name.startswith('QtGui'):
return name + "()"
if name.startswith('MyQtGui'):
return name + "()"
if name.startswith('Animation'):
[_, c] = name.split('.')
return 'Animation.create' + c + '()'
if name in ['Plugger', 'Manager']:
return 'Animation.create' + name + '()'
# default method
return name + '()'
# a test method
# YourSpecialCreator=Animation.createManager
def VerticalLayoutTab(title=''):
''' create as a tab widget for the comboView '''
t = QtGui.QLabel("my widget")
w = MikiWidget(t, "Reconstruction WB")
try:
FreeCAD.w5.append(w)
except:
FreeCAD.w5 = [w]
if title != '':
w.setWindowTitle(title)
w.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
ComboViewShowWidget(w, True)
# store it to FC.w5
return w
def setSpacer():
'''special command for QSpacerItem'''
return "__MAGIC__ Spacer"
def run_magic(p,c):
''' special command wrapper'''
p.layout.addItem(QtGui.QSpacerItem(
10, 10, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding))
def DockWidget(title=''):
'''create a dock widget in a gibes dock container window'''
t = QtGui.QLabel("my widget")
w = MikiDockWidget(t, "My Dock")
# w=QtGui.QWidget()
# w.setStyleSheet("QWidget { font: bold 18px;color:brown;border-style:
# outset;border-width: 3px;border-radius: 10px;border-color: blue;}")
if title != '':
w.setWindowTitle(title)
layout = QtGui.QVBoxLayout()
layout.setAlignment(QtCore.Qt.AlignTop)
# w.layout=layout
# w.setLayout(layout)
w.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
w.show()
# ComboViewShowWidget(w,True)
try:
FreeCAD.w5.append(w)
except:
FreeCAD.w5 = [w]
getdockwindowMgr2(w, "FreeCAD")
return w
def MainWindow(title=''):
'''create the dialog as a main window (not a dock widget)'''
w = QtGui.QWidget()
# w.setStyleSheet("QWidget { font: bold 18px;color:brown;border-style:
# outset;border-width: 3px;border-radius: 10px;border-color: blue;}")
if title != '':
w.setWindowTitle(title)
layout = QtGui.QVBoxLayout()
layout.setAlignment(QtCore.Qt.AlignTop)
w.layout = layout
w.setLayout(layout)
w.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
w.show()
try:
FreeCAD.w5.append(w)
except:
FreeCAD.w5 = [w]
return w
def HorizontalLayout(title=''):
'''create a QHBoxLayout'''
w = QtGui.QWidget()
# w.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
# w.setStyleSheet("QWidget { font: bold 18px;color:blue;border-style:
# outset;border-width: 3px;border-radius: 10px;border-color: blue;}")
layout = QtGui.QHBoxLayout()
layout.setAlignment(QtCore.Qt.AlignLeft)
w.setLayout(layout)
if title != '':
w.setWindowTitle(title)
# w.show()
# ComboViewShowWidget(w,False)
w.layout = layout
return w
def VerticalLayout(title=''):
'''create a QVBoxLayout'''
w = QtGui.QWidget()
# w.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
# w.setStyleSheet("QWidget { font: bold 18px;color:blue;border-style:
# outset;border-width: 3px;border-radius: 10px;border-color: blue;}")
layout = QtGui.QVBoxLayout()
layout.setAlignment(QtCore.Qt.AlignLeft)
w.setLayout(layout)
if title != '':
w.setWindowTitle(title)
w.layout = layout
return w
def HorizontalGroup(title=''):
'''create a GroupBox with a QHBoxLayout'''
w = QtGui.QGroupBox()
w.setStyleSheet(
"QGroupBox { border: 2px solid green; border-radius: 5px;"
"margin: 7px; margin-bottom: 7px; padding: 0px}"
"QGroupBox::title {top:-7 ex;left: 10px; subcontrol-origin: border}")
w.setTitle("horizonmmtal layout group")
layout = QtGui.QHBoxLayout()
layout.setAlignment(QtCore.Qt.AlignLeft)
w.setLayout(layout)
if title != '':
w.setWindowTitle(title)
w.layout = layout
return w
def VerticalGroup(title=''):
'''create a GroupBox with a QVBoxLayout'''
w = QtGui.QGroupBox()
w.setStyleSheet(
"QGroupBox { border: 3px solid blue; border-radius: "
"5px; margin: 7px; margin-bottom: 7px; padding: 0px} "
"QGroupBox::title {top:-7 ex;left: 10px; subcontrol-origin: border}")
w.setTitle("vertical layout group")
layout = QtGui.QVBoxLayout()
layout.setAlignment(QtCore.Qt.AlignLeft)
# label = QtGui.QLabel("HUWAS2")
# layout.addWidget(label)
# verticalSpacer = QtGui.QSpacerItem(10, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
# layout.addItem(verticalSpacer)
w.setLayout(layout)
if title != '':
w.setWindowTitle(title)
w.layout = layout
return w
##\cond
def ftab2(name="horizontal"):
w = QtGui.QWidget()
layout = QtGui.QHBoxLayout()
layout.setAlignment(QtCore.Qt.AlignLeft)
w.setLayout(layout)
pB = QtGui.QLabel(name)
pB.setStyleSheet(
"QWidget { font: bold 18px;color:red;border-style: outset;"
"border-width: 3px;border-radius: 10px;border-color: blue;}")
layout.addWidget(pB)
# w.show()
ComboViewShowWidget(w, False)
w1 = QtGui.QWidget()
# w.setStyleSheet("QWidget { font: bold 18px;color:blue;border-style:
# outset;border-width: 3px;border-radius: 10px;border-color: blue;}")
layout1 = QtGui.QVBoxLayout()
layout1.setAlignment(QtCore.Qt.AlignLeft)
w1.setLayout(layout1)
pB1 = QtGui.QLabel("name1")
layout1.addWidget(pB1)
pB1 = QtGui.QLabel("name1")
layout1.addWidget(pB1)
layout.addWidget(w1)
w2 = QtGui.QWidget()
# w.setStyleSheet("QWidget { font: bold 18px;color:blue;border-style:
# outset;border-width: 3px;border-radius: 10px;border-color: blue;}")
layout2 = QtGui.QVBoxLayout()
layout2.setAlignment(QtCore.Qt.AlignLeft)
w2.setLayout(layout2)
pB2 = QtGui.QLabel("name2")
layout2.addWidget(pB2)
pB1 = QtGui.QLabel("name1")
layout2.addWidget(pB1)
layout.addWidget(w2)
w.layout = layout
return w
##\endcond
'''
Miki is the parser and executer for miki configuration strings or files
'''
class Miki(object):
''' Miki is the parser and executer for miki configuration strings or files'''
def __init__(self):
## objects of huhu
self.objects = []
## the widget of the generated Qt sctructure
self.widget = None
## the ids of the input/output sub-widgets
self.ids = {}
##\cond
self.anchors = {}
self.indents = []
self.olistref = []
self.indpos = -1
self.app = None
self.classes = {}
self.lines = []
##\endcond
def parse2(self, configurationString):
'''parse the configuration string'''
ls = configurationString.splitlines()
# pylint: disable=unused-variable
app = self.app
line = 0
depth = 0
d = [0]*30
ln = [0]*30
refs = {}
rs = []
r = None
r = [-1, 0, 0, '']
for l in ls:
ltxt = l
if r:
rs.append(r)
r = [-1, 0, 0, '']
line += 1
if l.startswith('#:'):
res = re.search(r"#:\s*(\S.*)", l)
r = [l, line, -1, 'cmd', res.group(1)]
continue
if re.search(r"^\W*#", l):
continue
res = re.search(r"\<(\S.*)\>:", l)
if res:
parent = 0
ln[0] = line
depth = 0
r = [l, line, parent, "local class", res.group(1)]
self.classes[res.group(1)] = line
continue
res = re.search(r"(\s*)(\S.*)", l)
if res:
l = len(res.group(1))
if l == 0:
depth = 0
if d[depth] < l:
depth += 1
elif d[depth] > l:
depth -= 1
try:
d[depth] = l
except:
sayexc(str([l, ltxt]))
parent = ln[l - 1]
ln[l] = line
r = [l, line, parent, res.group(2), depth, ln]
st = res.group(2)
res = re.search(r"(\S+):\s*\*(\S+)", st)
if res:
r = [l, line, parent, 'link',
res.group(1), res.group(2), refs[res.group(2)]]
continue
res = re.search(r"(\S+):\s*&(\S+)\s+(\S.*)", st)
if res:
r = [l, line, parent, "anchor attr",
res.group(1), res.group(2), res.group(3)]
refs[res.group(2)] = line
continue
res = re.search(r"(\S+):\s*&(\S+)", st)
if res:
r = [l, line, parent, "anchor", res.group(1), res.group(2)]
refs[res.group(2)] = line
continue
res = re.search(r"(\S+[^:]):\s*([^:]\S.*)", st)
if res:
r = [l, line, parent, "att val", res.group(1), eval(res.group(2))]
if res.group(1) == 'Name':
rs[parent].append(res.group(2))
continue
res = re.search(r"\s*(\S):\s*([^:]\S.*)", st)
if res:
r = [l, line, parent, "att val", res.group(1), eval(res.group(2))]
if res.group(1) == 'Name':
rs[parent].append(res.group(2))
continue
else:
res = re.search(r"(\S+):", st)
if res:
r = [l, line, parent, "obj", res.group(1), 'no anchor']
##\cond
self.anchors = refs
self.lines = rs
##\endcond
#debug = getcb("mikidebug")
debug=0
#debug = 1
if debug:
say("lines parsed ...")
for r in rs:
say (r)
if len(self.anchors.keys()) > 0:
say("Anchors ....")
say(self.anchors)
def build(self):
'''execute the parsed data (expected in self.lines)'''
##\cond
self.widget = None
##\endcond
# from pivy import coin
# from pivy.coin import *
for l in self.lines:
if l[3] == 'cmd':
try:
exec(l[4])
except:
sayexc(str(["Error exec:", l[4]]))
continue
if l[3] == 'obj' or l[3] == 'anchor' or l[3] == 'local class':
name = l[4]
try:
f = name + "()"
f2 = name
except:
f = creatorFunction(l[4])
f = creatorFunction(l[4])
if len(l) < 7: # no name for object
l.append('')
if l[3] == 'local class':
exec("class " + name + "(object):pass")
h = eval(f2)
else:
h = eval(f)
if len(l) < 7:
l.append(None)
l.append(h)
self.objects.append(h)
##\cond
if self.widget == None:
self.widget = h
##\endcond
if l[2] != 0:
if l[4] == 'Name':
continue
if l[3] == 'obj' or l[3] == 'anchor':
parent = self.lines[l[2]][7]
self.addChild(parent, l[7])
if l[3] == 'link':
parent = self.lines[l[2]][7]
try:
child = self.lines[l[6]][7]
self.addChild(parent, child)
except:
# link eines attribs
method = l[4]
v = self.lines[l[6]][6]
kk = eval("parent." + l[4])
cnkk = kk.__class__.__name__
if cnkk.startswith('So'):
ex = "parent." + method + ".setValue(" + str(v) + ")"
exec(ex)
continue
if cnkk == 'builtin_function_or_method':
kk(v)
continue
cn = v.__class__.__name__
if cn == 'int' or cn == 'float':
ex = "parent." + l[4] + "=" + str(v)
elif cn == 'str':
ex = "parent." + l[4] + "='" + v + "'"
else:
sayW("nicht implementierter typ")
sayW([v,cn])
sayW(l)
ex = ''
exec(ex)
if l[3] == 'att val' or l[3] == 'anchor attr':
method = l[4]
parent = self.lines[l[2]][7]
if l[3] == 'att val':
v = l[5]
else:
v = l[6]
if method == 'id':
self.ids[v] = parent
continue
try:
kk = eval("parent." + l[4])
except:
cn = v.__class__.__name__
if cn == 'int' or cn == 'float':
ex = "parent." + l[4] + "=" + str(v)
elif cn == 'str':
ex = "parent." + l[4] + "='" + v + "'"
elif cn=='Vector':
sayW("nicht implementierter typ Ax")
sayW([v,cn])
sayW(l)
sayW(parent)
ex="parent."+l[4]+"(FreeCAD."+str(v)+")"
sayW("*** "+ex)
elif l[4]=='setValue':
parent.setValue(v)
continue
else:
sayW("nicht implementierter typ Ayy")
sayW([v,cn])
sayW(l)
ex=''
exec(ex)
continue
kk = eval("parent." + l[4])
cnkk = kk.__class__.__name__
if cnkk.startswith('So'):
if v.__class__.__name__.startswith('Sb'):
aaa=v
ex = "parent." + method + ".setValue(aaa)"
else:
ex = "parent." + method + ".setValue(" + str(v) + ")"
exec(ex)
continue
if cnkk == 'builtin_function_or_method':
kk(v)
continue
cn = v.__class__.__name__
if cn == 'int' or cn == 'float':
ex = "parent." + l[4] + "=" + str(v)
elif cn == 'str':
if l[4].startswith("run"):
ex = "parent." + l[4] + "('" + v + "')"
else:
ex = "parent." + l[4] + "='" + v + "'"
elif cn=='Vector':
ex="parent."+l[4]+"(FreeCAD."+str(v)+")"
else:
sayW("nicht implementierter typ B")
sayW([v,cn])
sayW(l)
aaa=v
ex="parent."+l[4]+"(aaa)"
say("//*** " + ex)
exec(ex)
return self.widget
def showSo(self):
''' add the item as openinventor objects to FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()'''
for l in self.lines:
if l[2] == 0 and l[0] != -1:
if len(l) < 7:
continue
r = l[7]
if r.__class__.__name__.startswith('So'):
sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
sg.addChild(r)
def showSo2(self, dokname):
''' add the item as openinventor objects to ActiveView.getSceneGraph() for a given document '''
for l in self.lines:
if l[2] == 0 and l[0] != -1:
r = l[7]
if r.__class__.__name__.startswith('So'):
dok = FreeCADGui.getDocument(dokname)
sg = dok.ActiveView.getSceneGraph()
sg.addChild(r)
def addChild(self, parent, child):
''' add the child to the parent during the build'''
p=parent
c=child
cc = c.__class__.__name__
if 0:
say (p)
say (p.__class__)
say ('--')
say (c)
say (c.__class__)
say (cc)
# if str(c.__class__).startswith("<type 'PySide.QtGui.") or str(c.__class__).startswith("<class 'nurbswb.miki"):
if str(c.__class__).startswith("<class 'PySide2.QtWidgets.") or \
str(c.__class__).startswith("<type 'PySide.QtGui.") or str(c.__class__).startswith("<class 'nurbswb.miki"):
if p.__class__.__name__ == '_MyTabWidget':
p.addWidget(c)
else:
p.layout.addWidget(c)
return
if cc.startswith('So'):
p.addChild(c)
return
if p.__class__.__name__ == 'object' or \
str(p.__class__).startswith("<class 'geodat.miki."):
# "Add children to object"
try:
p.children.append(c)
except:
p.children = [c]
return
try:
if str(p.TypeId) == 'Part::MultiFuse':
z = p.Shapes
z.append(c)
p.Shapes = z
elif str(p.TypeId) == 'Part::Compound':
z = p.Links
z.append(c)
p.Links = z
else:
try:
p.addObject(c)
except:
try:
if c.startswith('__MAGIC_'):
run_magic(p,c)
except:
FreeCAD.Console.PrintError("\naddObject funktioniert nicht A")
FreeCAD.Console.PrintError([p, c])
except:
try:
print ("TRy to add",c)
p.addObject(c)
except:
try:
if c.startswith('__MAGIC_'):
run_magic(p,c)
except:
FreeCAD.Console.PrintError("\naddObject funktioniert nicht BBB")
FreeCAD.Console.PrintError([p, c])
def run(self, string, cmd=None):
''' parse the configuration string and execute the resulting tree'''
debug = False
if debug:
sayW("parse2 ....")
self.parse2(string)
if debug:
sayW("build ...#")
rca = self.build()
if debug:
sayW("showSo ...")
self.showSo()
if cmd != None:
say("CMD ...")
say(cmd)
rca = cmd()
say(("rc run...", rca))
return rca
def roots(self):
'''returns a list of all roots of the configuration tree'''
rl = []
for l in self.lines:
if l[0] == 0:
rl.append(l)
return rl
def report(self, results=None):
'''some debug information about objects, anchors, roots'''
say("Results ...")
if results == None:
results = []
for r in results:
say(r)
if r.__class__.__name__.startswith('So'):
sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
sg.addChild(r)
say ("Data ...")
for ob in self.objects:
say (ob)
say (self.anchors)
for r in self.roots():
say (r)
class MikiWidget(QtGui.QWidget):
'''the widget for the mikidialog'''
def __init__(self, title_widget, objectname):
QtGui.QWidget.__init__(self)
## the widget with the title information
self.title_widget = title_widget
## the Tabname of the ComboView Tab for the miki widget
self.tabname = "MikiTab"
##\cond
self.setWindowTitle(objectname)
self.setObjectName(objectname)
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
self.layout = layout
self.dwl = None
##\endcond
class MikiDockWidget(QtGui.QDockWidget):
def __init__(self, title_widget, objectname):
QtGui.QDockWidget.__init__(self)
self.title_widget = title_widget
self.setWindowTitle(objectname)
self.setObjectName(objectname)
# self.toggle_title_widget(False)
# self.toggle_title_widget(True)
# self.topLevelChanged.connect(self.toggle_title_widget)
if 1:
self.setTitleBarWidget(None)
else:
self.setTitleBarWidget(self.title_widget)
self.setMinimumSize(200, 185)
self.centralWidget = QtGui.QWidget(self)
self.setWidget(self.centralWidget)
# self.centralWidget.setMaximumHeight(800)
layout = QtGui.QVBoxLayout()
self.layout = layout
self.centralWidget.setLayout(layout)
self.scroll = QtGui.QScrollArea()
self.liste = QtGui.QWidget()
self.lilayout = QtGui.QVBoxLayout()
self.liste.setLayout(self.lilayout)
mygroupbox = QtGui.QGroupBox()
mygroupbox.setStyleSheet(
"QWidget { background-color: lightblue;margin:0px;padding:0px;}"
"QPushButton { margin-right:0px;margin-left:0px;margin:0 px;padding:0px;;"
"background-color: lightblue;text-align:left;"
"padding:6px;padding-left:4px;color:brown; }")
self.mygroupbox = mygroupbox
myform = QtGui.QFormLayout()
self.myform = myform
self.myform.setSpacing(0)
mygroupbox.setLayout(myform)
scroll = QtGui.QScrollArea()
scroll.setWidget(mygroupbox)
scroll.setWidgetResizable(True)
self.lilayout.addWidget(scroll)
# optionaler Top button
if 0:
self.pushButton00 = QtGui.QPushButton(
QtGui.QIcon('icons:freecad.svg'), objectname)
layout.addWidget(self.pushButton00)
self.pushButton01 = QtGui.QPushButton(
QtGui.QIcon(FreeCAD.ConfigGet('UserAppData') + '/Mod/mylib/icons/mars.png'),
"Mars")
# self.pushButton01.clicked.connect(self.start)
# layout.addWidget(self.liste)
# layout.addWidget(self.pushButton01)
self.createToplevelButtons()
def createToplevelButtons(self):
'''create a row of top level buttons (experimental)'''
dw = QtGui.QWidget()
dwl = QtGui.QHBoxLayout()
dw.setLayout(dwl)
# pylint: disable=attribute-defined-outside-init
self.dwl = dwl
if 0: # Top level Icon leiste optional sichtbar machen
self.layout.addWidget(dw)
# self.setTitleBarWidget(dw)
l = QtGui.QLabel('Label')
# dwl.addWidget(l)
self.add_top(l)
b = QtGui.QPushButton('Butto')
# dwl.addWidget(b)
self.add_top(b)
b = QtGui.QPushButton(QtGui.QIcon('icons:freecad.svg'), 'Icon+Button')
# dwl.addWidget(b)
self.add_top(b)
b = QtGui.QPushButton(QtGui.QIcon('icons:view-refresh.svg'), '')
self.add_top(b)
b = QtGui.QPushButton(QtGui.QIcon(
'/home/microelly2/.FreeCAD/Mod/reconstruction/icons/std_viewscreenshot.svg'), 'Foto Image')
self.add_top(b)
b = QtGui.QPushButton(QtGui.QIcon(
'/home/microelly2/.FreeCAD/Mod/reconstruction/icons/web-home.svg'), 'Foto 3D')
self.add_top(b)
self.layout.setSpacing(0)
# self.layout = layout
def add_top(self, widget):
'''add a widget to the top level row'''
self.dwl.addWidget(widget)
def toggle_title_widget(self, off):
''' toggle the display of the TitleBar Widget'''
off=False
if off:
self.setTitleBarWidget(None)
else:
self.setTitleBarWidget(self.title_widget)
def getMainWindowByName(name):
'''returns a main window of a given Title,
if there is no such main window an new main window is created'''
if name == 'FreeCAD':
return FreeCADGui.getMainWindow()
toplevel2 = QtGui.qApp.topLevelWidgets()
for i in toplevel2:
if name == i.windowTitle():
i.show()
return i
r = QtGui.QMainWindow()
FreeCAD.r = r
r.setWindowTitle(name)
r.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
r.show()
return r
class _MyTabWidget(QtGui.QTabWidget):
def __init__(self,):
QtGui.QTabWidget.__init__(self)
self.setMinimumSize(500, 800)
def addWidget(self,w):
self.addTab(w,self.tabname)
self.show()
class MyWidget(QtGui.QLabel):
def __init__(self,):
QtGui.QLabel.__init__(self)
def setTabname(self,name):
self.tabname=nae
def MyTabWidget(title=''):
'''create the dialog as a main window (not a dock widget)'''
w = _MyTabWidget()
# layout = QtGui.QVBoxLayout()
# layout.setAlignment(QtCore.Qt.AlignTop)
# w.layout = layout
# w.setLayout(layout)
#
w.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
# w.show()
try:
FreeCAD.w5.append(w)
except:
FreeCAD.w5 = [w]
return w
def getdockwindowMgr2(dockwindow, winname="FreeCAD"):
'''add the dock window to the dockwindowManager(main window) winname'''
d3=dockwindow
if 1:
winname = "OTTO"
winname = "FreeCAD"
w = getMainWindowByName(winname)
w.addDockWidget(QtCore.Qt.LeftDockWidgetArea, d3)
d3.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
if 0:
t = QtGui.QLabel('Title 1')
d = MikiDockWidget(t, "huhu")
w.addDockWidget(QtCore.Qt.LeftDockWidgetArea, d)
t = QtGui.QLabel('Title 2')
d2 = MikiDockWidget(t, "haha")
w.addDockWidget(QtCore.Qt.LeftDockWidgetArea, d2)
w.tabifyDockWidget(d3, d2)
w.tabifyDockWidget(d2, d)
d3.raise_()
w.show()
return w
class Miki_Contextmenu(Miki):
'''the miki as contextmenu entry'''
def __init__(self, App, layoutstring, obj):
Miki.__init__(self)
## the Application with the logic behind the Gui
self.app = App()
self.app.root = self
self.app.obj = obj
obj.ViewObject.Proxy.cmenu.append(["Dialog", lambda: self.run(layoutstring)])
obj.ViewObject.Proxy.edit = lambda: self.run(layoutstring)
class MikiApp(object):
'''example for the execution layer of the Gui'''
def __init__(self):
## reference to the miki Gui object
self.root = None
def run(self):
'''example button clicked method'''
say ("Button clicked")
say (self.root)
say (self.root.widget)
def close2(self):
'''close the combo view tab'''
mw = getMainWindow()
tab = getComboView(mw)
c = tab.count()
# clear the combo windows #2 and more
for i in range(c - 1, 1, -1):
tab.removeTab(i)
def itemClicked(self, item):
'''example for item clicked'''
say (item)
say (item.text())
def close(self):
'''delete the widget'''
self.root.widget.deleteLater()
self.close2()
# https://stackoverflow.com/questions/28282434/how-to-combine-opencv-with-pyqt-to-create-a-simple-gui
class PicWidget(QtGui.QLabel):
'''the widget for the mikidialog'''
def __init__(self):
QtGui.QLabel.__init__(self)
self.sizeX=0
self.sizeY=0
self.label=None
def run_display(frame,pn):
import cv2
if frame.label == None:
label_Image = QtGui.QLabel(frame)
frame.label =label_Image
else:
label_Image = frame.label
label_Image.setAlignment(QtCore.Qt.AlignCenter)
# size=250
# im= np.zeros((size,size,3), np.uint8)
# im = cv2.imread(pn,0)
im = cv2.imread(pn)
cc = im.shape[1]*im.shape[2]
im[10:20,10:20]=[140,240,140]
image_profile = QtGui.QImage(im.data, im.shape[1], im.shape[0], cc, QtGui.QImage.Format_RGB888)
# image_profile = QtGui.QImage(image_path) #QImage object
if frame.sizeX!=0 and frame.sizeY!=0:
image_profile = image_profile.scaled(frame.sizeX,frame.sizeY, aspectRatioMode=QtCore.Qt.KeepAspectRatio, transformMode=QtCore.Qt.SmoothTransformation) # To scale image for example and keep its Aspect Ration
label_Image.setPixmap(QtGui.QPixmap.fromImage(image_profile))
frame.setMinimumSize(PySide.QtCore.QSize( im.shape[1], im.shape[0]))
if frame.sizeX!=0 and frame.sizeY!=0:
frame.setMinimumSize(PySide.QtCore.QSize(frame.sizeX,frame.sizeY))
label_Image.setAlignment(QtCore.Qt.AlignCenter)
return frame
##use case
#pn='/home/thomas/Bilder/bp_841.png'
#frame= PicWidget()
#frame.show_frame_in_display(pn)
#frame.show()
class Controller(MikiApp):
pass
def createMikiGui(layout, app):
'''creates a miki Gui object (widget and logic)
for the view layout string and the controller app'''
miki = Miki()
appi = app()
miki.app = appi
appi.root = miki
rca = miki.run(layout)
return rca
def createMikiGui2(layout, app):
'''creates a miki Gui object (widget and logic)
for the view layout string and the controller app'''
miki = Miki()
appi = app()
miki.app = appi
appi.root = miki
rca = miki.run(layout)
#return rca,miki
return appi
def testme(mode=''):
'''miki Qt example
modes: VerticalLayoutTab MainWindow DockWidget
'''
layout = '''
#VerticalLayoutTab:
MainWindow:
#DockWidget:
# id:'main'
QtGui.QLabel:
setText:"*** N U R B S E D I T O R ***"
VerticalLayout:
HorizontalGroup:
QtGui.QLabel:
setText: "My Label"
QtGui.QPushButton:
setText: "My Button"
QtGui.QLabel:
setText: "My Label 2"
VerticalGroup:
QtGui.QLabel:
setText:"Action "
QtGui.QPushButton:
setText: "Run Action"
clicked.connect: app.run
setIcon: QtGui.QIcon('/home/thomas/.FreeCAD/Mod/freecad-transportation-wb/icons/one_center_curve.svg')
setIconSize: QtCore.QSize(40,40)
QtGui.QPushButton:
setText: "close Tab"
clicked.connect: app.close
HorizontalGroup:
VerticalLayout:
QtGui.QLineEdit:
setText:"edit Axample"
QtGui.QLineEdit:
setText:"edit B"
VerticalGroup:
QtGui.QLineEdit:
setText:"edit B"
QtGui.QLabel:
setText: "My Label"
# QtGui.QLineEdit:
# setText:"horizel "
QtGui.QListWidget:
addItem: "HUHU"
addItem: "HADA"
addItem: "Hooo"
addItem: "Hiii"
itemClicked.connect: app.itemClicked
HorizontalLayout:
QtGui.QLabel:
#setGeometry: PySide.QtCore.QRect(0,0,100,300)
setPixmap: QtGui.QPixmap('/home/thomas/Bilder/freeka.png')
VerticalLayout:
QtGui.QLineEdit:
setText:"AA"
QtGui.QLineEdit:
setText:"BB"
VerticalGroup:
setTitle: "Slider"
HorizontalLayout:
# https://srinikom.github.io/pyside-docs/PySide/QtGui/QSlider.html
# https://github.com/pyside/Examples/blob/master/examples/widgets/sliders.py
QtGui.QSlider:
QtGui.QSlider:
VerticalLayout:
QtGui.QDial:
setFocusPolicy: QtCore.Qt.StrongFocus
QtGui.QDial:
QtGui.QSlider:
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
HorizontalGroup:
QtGui.QComboBox:
id: 'actionmode'
addItem: "change Height relative"
addItem: "set absolute Height and Weight"
addItem: "Add VLine"
addItem: "Add ULine"
# addItem: "Elevate VLine"
# addItem: "Elevate ULine"
# addItem: "Elevate Rectangle"
# addItem: "Elevate Circle"
# currentIndexChanged.connect: app.setActionMode
QtGui.QCheckBox:
id: 'relativemode'
setText: 'Height relative'
# stateChanged.connect: app.relativeMode
setChecked: True
'''
layoutVT = '''
VerticalLayoutTab:
setAlignment:QtCore.Qt.AlignTop
VerticalGroup:
QtGui.QPushButton:
QtGui.QPushButton:
QtGui.QLabel:
setText:"*** Tab D E M O ***"
VerticalGroup:
setAlignment:QtCore.Qt.AlignTop
HorizontalGroup:
QtGui.QPushButton:
setText: "close"
clicked.connect: app.close
QtGui.QPushButton:
QtGui.QPushButton:
VerticalGroup:
setSpacer:
'''
layoutMW = '''
MainWindow:
QtGui.QLabel:
setText:"*** Main Window D E M O ***"
VerticalLayout:
HorizontalGroup:
QtGui.QPushButton:
setText: "close"
clicked.connect: app.close
'''
layoutDW = '''
DockWidget:
QtGui.QLabel:
setText:"*** Dock Widget D E M O ***"
VerticalLayout:
HorizontalGroup:
QtGui.QPushButton:
setText: "close"
clicked.connect: app.close
setSpacer:
'''
if mode == 'VerticalLayoutTab':
layout = layoutVT
elif mode == 'MainWindow':
layout = layoutMW
elif mode == 'DockWidget':
layout = layoutDW
mikigui = createMikiGui(layout, MikiApp)
return mikigui
if __name__ == '__main__':
say("miki transport ...")
testme()
def testDialogMainWindow():
return testme("MainWindow")
def testDialogTab():
return testme('VerticalLayoutTab')
def testDialogDockWidget():
return testme("DockWidget")
def testDialog():
rc = testme()
return rc
| lgpl-3.0 |
marymhayes/wats4000-final-project | node_modules/node-gyp/gyp/gyptest.py | 1752 | 8019 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
persandstrom/home-assistant | homeassistant/components/google_assistant/__init__.py | 1 | 3619 | """
Support for Actions on Google Assistant Smart Home Control.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/google_assistant/
"""
import asyncio
import logging
from typing import Dict, Any
import aiohttp
import async_timeout
import voluptuous as vol
# Typing imports
from homeassistant.core import HomeAssistant
from homeassistant.const import CONF_NAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.loader import bind_hass
from .const import (
DOMAIN, CONF_PROJECT_ID, CONF_CLIENT_ID, CONF_ACCESS_TOKEN,
CONF_EXPOSE_BY_DEFAULT, DEFAULT_EXPOSE_BY_DEFAULT, CONF_EXPOSED_DOMAINS,
DEFAULT_EXPOSED_DOMAINS, CONF_AGENT_USER_ID, CONF_API_KEY,
SERVICE_REQUEST_SYNC, REQUEST_SYNC_BASE_URL, CONF_ENTITY_CONFIG,
CONF_EXPOSE, CONF_ALIASES, CONF_ROOM_HINT
)
from .auth import GoogleAssistantAuthView
from .http import async_register_http
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
DEFAULT_AGENT_USER_ID = 'home-assistant'
ENTITY_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_EXPOSE): cv.boolean,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ROOM_HINT): cv.string
})
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
vol.Required(CONF_PROJECT_ID): cv.string,
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_EXPOSE_BY_DEFAULT,
default=DEFAULT_EXPOSE_BY_DEFAULT): cv.boolean,
vol.Optional(CONF_EXPOSED_DOMAINS,
default=DEFAULT_EXPOSED_DOMAINS): cv.ensure_list,
vol.Optional(CONF_AGENT_USER_ID,
default=DEFAULT_AGENT_USER_ID): cv.string,
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ENTITY_SCHEMA}
}
},
extra=vol.ALLOW_EXTRA)
@bind_hass
def request_sync(hass):
"""Request sync."""
hass.services.call(DOMAIN, SERVICE_REQUEST_SYNC)
async def async_setup(hass: HomeAssistant, yaml_config: Dict[str, Any]):
"""Activate Google Actions component."""
config = yaml_config.get(DOMAIN, {})
agent_user_id = config.get(CONF_AGENT_USER_ID)
api_key = config.get(CONF_API_KEY)
hass.http.register_view(GoogleAssistantAuthView(hass, config))
async_register_http(hass, config)
async def request_sync_service_handler(call):
"""Handle request sync service calls."""
websession = async_get_clientsession(hass)
try:
with async_timeout.timeout(5, loop=hass.loop):
res = await websession.post(
REQUEST_SYNC_BASE_URL,
params={'key': api_key},
json={'agent_user_id': agent_user_id})
_LOGGER.info("Submitted request_sync request to Google")
res.raise_for_status()
except aiohttp.ClientResponseError:
body = await res.read()
_LOGGER.error(
'request_sync request failed: %d %s', res.status, body)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Could not contact Google for request_sync")
# Register service only if api key is provided
if api_key is not None:
hass.services.async_register(
DOMAIN, SERVICE_REQUEST_SYNC, request_sync_service_handler)
return True
| apache-2.0 |
vmendez/DIRAC | ResourceStatusSystem/Policy/test/Test_RSS_Policy_DTPolicy.py | 3 | 3965 | """ Test_RSS_Policy_DTPolicy
"""
from mock import MagicMock
import unittest
from DIRAC import gLogger
import DIRAC.ResourceStatusSystem.Policy.DowntimePolicy as moduleTested
__RCSID__ = '$Id: $'
################################################################################
class DTPolicy_TestCase( unittest.TestCase ):
def setUp( self ):
""" Setup
"""
gLogger.setLevel( 'DEBUG' )
self.moduleTested = moduleTested
self.testClass = self.moduleTested.DowntimePolicy
self.DTCommand = MagicMock()
def tearDown( self ):
""" TearDown
"""
del self.testClass
del self.moduleTested
################################################################################
# Tests
class DTPolicy_Success( DTPolicy_TestCase ):
def test_instantiate( self ):
""" tests that we can instantiate one object of the tested class
"""
policy = self.testClass()
self.assertEqual( 'DowntimePolicy', policy.__class__.__name__ )
def test_evaluate( self ):
""" tests the evaluate method
"""
policy = self.testClass()
# command failing
self.DTCommand.doCommand.return_value = { 'OK' : False, 'Message' : 'Grumpy command' }
policy.setCommand( self.DTCommand )
res = policy.evaluate()
self.assert_( res['OK'] )
self.assertEqual( 'Grumpy command', res['Value']['Reason'] )
self.assertEqual( 'Error', res['Value']['Status'] )
# command failing /2
self.DTCommand.doCommand.return_value = { 'OK' : True, 'Value' : {'Severity': 'XYZ',
'EndDate' : 'Y',
'DowntimeID': '123',
'Description': 'blah' } }
self.assertEqual( 'Error', res['Value']['Status'] )
res = policy.evaluate()
self.assert_( res[ 'OK' ] )
# command result empty
self.DTCommand.doCommand.return_value = {'OK': True, 'Value': None}
res = policy.evaluate()
self.assert_( res[ 'OK' ] )
self.assertEqual( 'Active', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No DownTime announced', res[ 'Value' ][ 'Reason' ] )
# command result with a DT
self.DTCommand.doCommand.return_value = { 'OK' : True, 'Value' : {'Severity':'OUTAGE',
'EndDate':'Y',
'DowntimeID': '123',
'Description': 'blah' }}
policy.command = self.DTCommand
res = policy.evaluate()
self.assertEqual( True, res[ 'OK' ] )
self.assertEqual( 'Banned', res[ 'Value' ][ 'Status' ] )
self.assertEqual( '123 blah', res[ 'Value' ][ 'Reason' ] )
# command mock
self.DTCommand.doCommand.return_value = { 'OK' : True, 'Value' : {'Severity': 'WARNING',
'EndDate': 'Y',
'DowntimeID': '123',
'Description': 'blah' }}
policy.command = self.DTCommand
res = policy.evaluate()
self.assertEqual( True, res[ 'OK' ] )
self.assertEqual( 'Degraded', res[ 'Value' ][ 'Status' ] )
self.assertEqual( '123 blah', res[ 'Value' ][ 'Reason' ] )
################################################################################
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( DTPolicy_TestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( DTPolicy_Success ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
NelisVerhoef/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
BrandonY/gsutil | gslib/ui_controller.py | 5 | 47872 | # -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for gsutil UI controller, UIThread and MainThreadUIQueue."""
from __future__ import absolute_import
from collections import deque
import Queue
import sys
import threading
import time
from gslib.metrics import LogPerformanceSummaryParams
from gslib.metrics import LogRetryableError
from gslib.parallelism_framework_util import ZERO_TASKS_TO_DO_ARGUMENT
from gslib.thread_message import FileMessage
from gslib.thread_message import FinalMessage
from gslib.thread_message import MetadataMessage
from gslib.thread_message import PerformanceSummaryMessage
from gslib.thread_message import ProducerThreadMessage
from gslib.thread_message import ProgressMessage
from gslib.thread_message import RetryableErrorMessage
from gslib.thread_message import SeekAheadMessage
from gslib.thread_message import StatusMessage
from gslib.util import DecimalShort
from gslib.util import HumanReadableWithDecimalPlaces
from gslib.util import MakeHumanReadable
from gslib.util import PrettyTime
class EstimationSource(object):
"""enum for total size source."""
# Integer to indicate total size came from the final ProducerThreadMessage.
# It has priority over all other total_size sources.
PRODUCER_THREAD_FINAL = 1
# Integer to indicate total size came from SeekAheadThread.
# It has priority over self.SEEK_AHEAD_THREAD and over
# self.INDIVIDUAL_MESSAGES.
SEEK_AHEAD_THREAD = 2
# Integer to indicate total size came from a ProducerThread estimation.
# It has priority over self.INDIVIDUAL_MESSAGES.
PRODUCER_THREAD_ESTIMATE = 3
# Stores the actual source from total_size. We start from FileMessages or
# MetadataMessages.
INDIVIDUAL_MESSAGES = 4
# Note: this priority based model was used in case we add new sources for
# total_size in the future. It also allows us to search for smaller numbers
# (larger priorities) rather than having to list those with higher priority.
def BytesToFixedWidthString(num_bytes, decimal_places=1):
"""Adjusts proper width for printing num_bytes in readable format.
Args:
num_bytes: The number of bytes we must display.
decimal_places: The standard number of decimal places.
Returns:
String of fixed width representing num_bytes.
"""
human_readable = HumanReadableWithDecimalPlaces(num_bytes,
decimal_places=decimal_places)
number_format = human_readable.split()
if int(round(float(number_format[0]))) >= 1000:
# If we are in the [1000:1024) range for the whole part of the number,
# we must remove the decimal part.
last_character = len(number_format[0]) - decimal_places - 1
number_format[0] = number_format[0][:last_character]
return '%9s' % (' '.join(number_format))
class StatusMessageManager(object):
"""General manager for common functions shared by data and metadata managers.
This subclass has the responsibility of having a common constructor and the
same handler for SeekAheadMessages and ProducerThreadMessages.
"""
class _ThroughputInformation(object):
"""Class that contains all information needed for throughput calculation.
This _ThroughputInformation is used to track progress and time at several
points of our operation.
"""
def __init__(self, progress, report_time):
"""Constructor of _ThroughputInformation.
Args:
progress: The current progress, in bytes/second or objects/second.
report_time: Float representing when progress was reported (seconds
since Epoch).
"""
self.progress = progress
self.time = report_time
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
console_width=80):
"""Instantiates a StatusMessageManager.
Args:
update_message_period: Minimum period for refreshing and displaying
new information. A non-positive value will ignore
any time restrictions imposed by this field, but
it will affect throughput and time remaining
estimations.
update_spinner_period: Minimum period for refreshing and displaying the
spinner. A non-positive value will ignore
any time restrictions imposed by this field.
sliding_throughput_period: Sliding period for throughput calculation. A
non-positive value will make it impossible to
calculate the throughput.
first_throughput_latency: Minimum waiting time before actually displaying
throughput info. A non-positive value will
ignore any time restrictions imposed by this
field.
quiet_mode: If True, do not print status messages (but still process
them for analytics reporting as necessary).
custom_time: If a custom start_time is desired. Used for testing.
verbose: Tells whether or not the operation is on verbose mode.
console_width: Width to display on console. This should not adjust the
visual output, just the space padding. For proper
visualization, we recommend setting this field to at least
80.
"""
self.update_message_period = update_message_period
self.update_spinner_period = update_spinner_period
self.sliding_throughput_period = sliding_throughput_period
self.first_throughput_latency = first_throughput_latency
self.quiet_mode = quiet_mode
self.custom_time = custom_time
self.verbose = verbose
self.console_width = console_width
# Initial estimation source for number of objects and total size
# is through individual FileMessages or individual MetadataMessages,
# depending on the StatusMessageManager superclass.
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.total_size_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects = 0
# Only used on data operations. Will remain 0 for metadata operations.
self.total_size = 0
# Time at last info update displayed.
self.refresh_message_time = (self.custom_time if self.custom_time
else time.time())
self.start_time = self.refresh_message_time
# Time at last spinner update.
self.refresh_spinner_time = self.refresh_message_time
# Measured in objects/second or bytes/second, depending on the superclass.
self.throughput = 0.0
# Deque of _ThroughputInformation to help with throughput calculation.
self.old_progress = deque()
self.last_progress_time = 0
self.spinner_char_list = ['/', '-', '\\', '|']
self.current_spinner_index = 0
self.objects_finished = 0
self.num_objects = 0 # Number of objects being processed
# This overrides time constraints for updating and displaying
# important information, such as having finished to process an object.
self.object_report_change = False
self.final_message = False
def GetSpinner(self):
"""Returns the current spinner character.
Returns:
char_to_print: Char to be printed as the spinner
"""
return self.spinner_char_list[self.current_spinner_index]
def UpdateSpinner(self):
"""Updates the current spinner character."""
self.current_spinner_index = ((self.current_spinner_index + 1) %
len(self.spinner_char_list))
def _HandleProducerThreadMessage(self, status_message):
"""Handles a ProducerThreadMessage.
Args:
status_message: The ProducerThreadMessage to be processed.
"""
if status_message.finished:
# This means this was a final ProducerThreadMessage.
if self.num_objects_source >= EstimationSource.PRODUCER_THREAD_FINAL:
self.num_objects_source = EstimationSource.PRODUCER_THREAD_FINAL
self.num_objects = status_message.num_objects
if (self.total_size_source >= EstimationSource.PRODUCER_THREAD_FINAL and
status_message.size):
self.total_size_source = EstimationSource.PRODUCER_THREAD_FINAL
self.total_size = status_message.size
return
if self.num_objects_source >= EstimationSource.PRODUCER_THREAD_ESTIMATE:
self.num_objects_source = EstimationSource.PRODUCER_THREAD_ESTIMATE
self.num_objects = status_message.num_objects
if (self.total_size_source >= EstimationSource.PRODUCER_THREAD_ESTIMATE and
status_message.size):
self.total_size_source = EstimationSource.PRODUCER_THREAD_ESTIMATE
self.total_size = status_message.size
def _HandleSeekAheadMessage(self, status_message, stream):
"""Handles a SeekAheadMessage.
Args:
status_message: The SeekAheadMessage to be processed.
stream: Stream to print messages.
"""
estimate_message = ('Estimated work for this command: objects: %s' %
status_message.num_objects)
if status_message.size:
estimate_message += (', total size: %s' %
MakeHumanReadable(status_message.size))
if self.total_size_source >= EstimationSource.SEEK_AHEAD_THREAD:
self.total_size_source = EstimationSource.SEEK_AHEAD_THREAD
self.total_size = status_message.size
if self.num_objects_source >= EstimationSource.SEEK_AHEAD_THREAD:
self.num_objects_source = EstimationSource.SEEK_AHEAD_THREAD
self.num_objects = status_message.num_objects
estimate_message += '\n'
if not self.quiet_mode:
stream.write(estimate_message)
def _HandlePerformanceSummaryMessage(self, status_message):
"""Handles a PerformanceSummaryMessage.
Args:
status_message: The PerformanceSummaryMessage to be processed.
"""
LogPerformanceSummaryParams(uses_slice=status_message.uses_slice)
def ShouldTrackThroughput(self, cur_time):
"""Decides whether enough time has passed to start tracking throughput.
Args:
cur_time: current time.
Returns:
Whether or not we should track the throughput.
"""
return cur_time - self.start_time >= self.first_throughput_latency
def ShouldPrintProgress(self, cur_time):
"""Decides whether or not it is time for printing a new progress.
Args:
cur_time: current time.
Returns:
Whether or not we should print the progress.
"""
sufficient_time_elapsed = (
cur_time - self.refresh_message_time >= self.update_message_period)
# Don't report if we aren't actually going to do anything (for example,
# an rsync that will sync 0 objects).
nonzero_report = self.num_objects
return (sufficient_time_elapsed or self.object_report_change) and (
nonzero_report)
def ShouldPrintSpinner(self, cur_time):
"""Decides whether or not it is time for updating the spinner character.
Args:
cur_time: Current time.
Returns:
Whether or not we should update and print the spinner.
"""
return (cur_time - self.refresh_spinner_time >
self.update_spinner_period and self.total_size)
def PrintSpinner(self, stream=sys.stderr):
"""Prints a spinner character.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
self.UpdateSpinner()
if not self.quiet_mode:
stream.write(self.GetSpinner() + '\r')
def UpdateThroughput(self, cur_time, cur_progress):
"""Updates throughput if the required period for calculation has passed.
The throughput is calculated by taking all the progress (objects or bytes)
processed within the last sliding_throughput_period seconds, and dividing
that by the time period between the oldest progress time within that range
and the last progress measurement, which are defined by oldest_progress[1]
and last_progress_time, respectively. Among the pros of this approach,
a connection break or a sudden change in throughput is quickly noticeable.
Furthermore, using the last throughput measurement rather than the current
time allows us to have a better estimation of the actual throughput.
Args:
cur_time: Current time to check whether or not it is time for a new
throughput measurement.
cur_progress: The current progress, in number of objects finished or in
bytes.
"""
while (len(self.old_progress) > 1 and
cur_time - self.old_progress[0].time >
self.sliding_throughput_period):
self.old_progress.popleft()
if not self.old_progress:
return
oldest_progress = self.old_progress[0]
if self.last_progress_time == oldest_progress.time:
self.throughput = 0
return
# If old-progress is not empty and the time of oldest_progress does not
# match the last_progress_time, we can safely calculate the throughput.
self.throughput = ((cur_progress - oldest_progress.progress) /
(self.last_progress_time -
oldest_progress.time))
# Just to avoid -0.00 B/s.
self.throughput = max(0, self.throughput)
def PrintFinalSummaryMessage(self, stream=sys.stderr):
"""Prints a final message to indicate operation succeeded.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
string_to_print = ('Operation completed over %s objects'
% DecimalShort(self.num_objects))
if self.total_size:
string_to_print += (
'/%s' % HumanReadableWithDecimalPlaces(self.total_size))
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(('\n' + string_to_print + '.' +
(max(remaining_width, 0) * ' ') + '\n'))
class MetadataManager(StatusMessageManager):
"""Manages shared state for metadata operations.
This manager is specific for metadata operations. Among its main functions,
it receives incoming StatusMessages, storing all necessary data
about the current and past states of the system necessary to display to the
UI. It also provides methods for calculating metrics such as throughput and
estimated time remaining. Finally, it provides methods for displaying messages
to the UI.
"""
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
console_width=80):
# pylint: disable=g-doc-args
"""Instantiates a MetadataManager.
See argument documentation in StatusMessageManager base class.
"""
# pylint: enable=g-doc-args
super(MetadataManager, self).__init__(
update_message_period=update_message_period,
update_spinner_period=update_spinner_period,
sliding_throughput_period=sliding_throughput_period,
first_throughput_latency=first_throughput_latency,
quiet_mode=quiet_mode, custom_time=custom_time, verbose=verbose,
console_width=console_width)
def GetProgress(self):
"""Gets the progress for a MetadataManager.
Returns:
The number of finished objects.
"""
return self.objects_finished
def _HandleMetadataMessage(self, status_message):
"""Handles a MetadataMessage.
Args:
status_message: The MetadataMessage to be processed.
"""
self.objects_finished += 1
if self.num_objects_source >= EstimationSource.INDIVIDUAL_MESSAGES:
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects += 1
# Ensures we print periodic progress, and that we send a final message.
self.object_report_change = True
self.last_progress_time = status_message.time
if (self.objects_finished == self.num_objects and
self.num_objects_source == EstimationSource.PRODUCER_THREAD_FINAL):
self.final_message = True
def ProcessMessage(self, status_message, stream):
"""Processes a message from _MainThreadUIQueue or _UIThread.
Args:
status_message: The StatusMessage item to be processed.
stream: Stream to print messages.
"""
self.object_report_change = False
if isinstance(status_message, SeekAheadMessage):
self._HandleSeekAheadMessage(status_message, stream)
elif isinstance(status_message, ProducerThreadMessage):
self._HandleProducerThreadMessage(status_message)
elif isinstance(status_message, MetadataMessage):
self._HandleMetadataMessage(status_message)
elif isinstance(status_message, RetryableErrorMessage):
LogRetryableError(status_message)
elif isinstance(status_message, PerformanceSummaryMessage):
self._HandlePerformanceSummaryMessage(status_message)
self.old_progress.append(
self._ThroughputInformation(self.objects_finished, status_message.time))
def PrintProgress(self, stream=sys.stderr):
"""Prints progress and throughput/time estimation.
Prints total number of objects and number of finished objects with the
percentage of work done, potentially including the throughput
(in objects/second) and estimated time remaining.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
# Time to update all information
total_remaining = self.num_objects - self.objects_finished
if self.throughput:
time_remaining = total_remaining / self.throughput
else:
time_remaining = None
char_to_print = self.GetSpinner()
if self.num_objects_source <= EstimationSource.SEEK_AHEAD_THREAD:
# An example of objects_completed here would be ' [2/3 objects]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) + '/' +
DecimalShort(self.num_objects) + ' objects]')
if self.num_objects == self.objects_finished:
percentage = '100'
else:
percentage = ('%3d' % min(99, int(100 * float(self.objects_finished) /
self.num_objects)))
percentage_completed = percentage + '% Done'
else:
# An example of objects_completed here would be ' [2 objects]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) +
' objects]')
percentage_completed = ''
if (self.refresh_message_time - self.start_time >
self.first_throughput_latency):
# Should also include throughput.
# An example of throughput here would be '2 objects/s'
throughput = '%.2f objects/s' % self.throughput
if (self.num_objects_source <= EstimationSource.PRODUCER_THREAD_ESTIMATE
and self.throughput):
# Should also include time remaining.
# An example of time remaining would be ' ETA 00:00:11'.
time_remaining_str = 'ETA ' + PrettyTime(time_remaining)
else:
time_remaining_str = ''
else:
throughput = ''
time_remaining_str = ''
format_str = ('{char_to_print} {objects_completed} {percentage_completed}'
' {throughput} {time_remaining_str}')
string_to_print = format_str.format(
char_to_print=char_to_print, objects_completed=objects_completed,
percentage_completed=percentage_completed, throughput=throughput,
time_remaining_str=time_remaining_str)
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(string_to_print + (max(remaining_width, 0) * ' ') + '\r')
def CanHandleMessage(self, status_message):
"""Determines whether this manager is suitable for handling status_message.
Args:
status_message: The StatusMessage object to be analyzed.
Returns:
True if this message can be properly handled by this manager,
False otherwise.
"""
if isinstance(status_message, (SeekAheadMessage, ProducerThreadMessage,
MetadataMessage, FinalMessage,
RetryableErrorMessage,
PerformanceSummaryMessage)):
return True
return False
class DataManager(StatusMessageManager):
"""Manages shared state for data operations.
This manager is specific for data operations. Among its main functions,
it receives incoming StatusMessages, storing all necessary data
about the current and past states of the system necessary to display to the
UI. It also provides methods for calculating metrics such as throughput and
estimated time remaining. Finally, it provides methods for displaying messages
to the UI.
"""
class _ProgressInformation(object):
"""Class that contains all progress information needed for a given file.
This _ProgressInformation is used as the value associated with a file_name
in the dict that stores the information about all processed files.
"""
def __init__(self, size):
"""Constructor of _ProgressInformation.
Args:
size: The total size of the file.
"""
# Sum of all progress obtained in this operation.
self.new_progress_sum = 0
# Sum of all progress from previous operations (mainly for resuming
# uploads or resuming downloads).
self.existing_progress_sum = 0
# Dict for tracking the progress for each individual component. Key is
# of the form (component_num, dst_url) and correspondent element is a
# tuple which stores the current progress obtained from this operation,
# and the progress obtained from previous operations.
self.dict = {}
# The total size for the file
self.size = size
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
console_width=None):
# pylint: disable=g-doc-args
"""Instantiates a DataManager.
See argument documentation in StatusMessageManager base class.
"""
# pylint: disable=g-doc-args
super(DataManager, self).__init__(
update_message_period=update_message_period,
update_spinner_period=update_spinner_period,
sliding_throughput_period=sliding_throughput_period,
first_throughput_latency=first_throughput_latency,
quiet_mode=quiet_mode, custom_time=custom_time, verbose=verbose,
console_width=console_width)
self.first_item = True
self.total_progress = 0 # Sum of progress for all threads.
self.new_progress = 0
self.existing_progress = 0
# Dict containing individual progress for each file. Key is filename
# (from src_url). It maps to a _ProgressInformation object.
self.individual_file_progress = {}
self.component_total = 0
self.finished_components = 0
self.existing_components = 0
def GetProgress(self):
"""Gets the progress for a DataManager.
Returns:
The number of processed bytes in this operation.
"""
return self.new_progress
def _HandleFileDescription(self, status_message):
"""Handles a FileMessage that describes a file.
Args:
status_message: the FileMessage to be processed.
"""
if not status_message.finished:
# File started.
if self.first_item and not self.custom_time:
# Set initial time.
self.refresh_message_time = status_message.time
self.start_time = self.refresh_message_time
self.last_throughput_time = self.refresh_message_time
self.first_item = False
# Gets file name (from src_url).
file_name = status_message.src_url.url_string
status_message.size = status_message.size if status_message.size else 0
# Creates a new entry on individual_file_progress.
self.individual_file_progress[file_name] = (
self._ProgressInformation(status_message.size))
if self.num_objects_source >= EstimationSource.INDIVIDUAL_MESSAGES:
# This ensures the file has not been counted on SeekAheadThread or
# in ProducerThread.
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects += 1
if self.total_size_source >= EstimationSource.INDIVIDUAL_MESSAGES:
# This ensures the file size has not been counted on SeekAheadThread or
# in ProducerThread.
self.total_size_source = EstimationSource.INDIVIDUAL_MESSAGES
self.total_size += status_message.size
self.object_report_change = True
else:
# File finished.
self.objects_finished += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
total_bytes_transferred = (file_progress.new_progress_sum +
file_progress.existing_progress_sum)
# Ensures total_progress has the right value.
self.total_progress += file_progress.size - total_bytes_transferred
self.new_progress += file_progress.size - total_bytes_transferred
self.last_progress_time = status_message.time
# Deleting _ProgressInformation object to save memory.
del self.individual_file_progress[file_name]
self.object_report_change = True
if (self.objects_finished == self.num_objects and
self.num_objects_source == EstimationSource.PRODUCER_THREAD_FINAL):
self.final_message = True
def _IsFile(self, file_message):
"""Tells whether or not this FileMessage represent a file.
This is needed because FileMessage is used by both files and components.
Args:
file_message: The FileMessage to be analyzed.
Returns:
Whether or not this represents a file.
"""
message_type = file_message.message_type
return (message_type == FileMessage.FILE_DOWNLOAD or
message_type == FileMessage.FILE_UPLOAD or
message_type == FileMessage.FILE_CLOUD_COPY or
message_type == FileMessage.FILE_DAISY_COPY or
message_type == FileMessage.FILE_LOCAL_COPY or
message_type == FileMessage.FILE_REWRITE or
message_type == FileMessage.FILE_HASH)
def _HandleComponentDescription(self, status_message):
"""Handles a FileMessage that describes a component.
Args:
status_message: The FileMessage to be processed.
"""
if (status_message.message_type == FileMessage.EXISTING_COMPONENT and
not status_message.finished):
# Existing component: have to ensure total_progress accounts for it.
self.existing_components += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
key = (status_message.component_num, status_message.dst_url)
file_progress.dict[key] = (0, status_message.size)
file_progress.existing_progress_sum += status_message.size
self.total_progress += status_message.size
self.existing_progress += status_message.size
elif ((status_message.message_type == FileMessage.COMPONENT_TO_UPLOAD or
status_message.message_type == FileMessage.COMPONENT_TO_DOWNLOAD)):
if not status_message.finished:
# Component started.
self.component_total += 1
if status_message.message_type == FileMessage.COMPONENT_TO_DOWNLOAD:
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
file_progress.existing_progress_sum += (
status_message.bytes_already_downloaded)
key = (status_message.component_num, status_message.dst_url)
file_progress.dict[key] = (0, status_message.bytes_already_downloaded)
self.total_progress += status_message.bytes_already_downloaded
self.existing_progress += status_message.bytes_already_downloaded
else:
# Component finished.
self.finished_components += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
key = (status_message.component_num, status_message.dst_url)
last_update = (
file_progress.dict[key] if key in file_progress.dict else (0, 0))
self.total_progress += status_message.size - sum(last_update)
self.new_progress += status_message.size - sum(last_update)
self.last_progress_time = status_message.time
file_progress.new_progress_sum += (status_message.size -
sum(last_update))
file_progress.dict[key] = (status_message.size - last_update[1],
last_update[1])
def _HandleProgressMessage(self, status_message):
"""Handles a ProgressMessage that tracks progress of a file or component.
Args:
status_message: The ProgressMessage to be processed.
"""
# Retrieving index and dict for this file.
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
# Retrieves last update ((0,0) if no previous update) for this file or
# component. To ensure uniqueness (among components),
# we use a (component_num, dst_url) tuple as our key.
key = (status_message.component_num, status_message.dst_url)
last_update = (
file_progress.dict[key] if key in file_progress.dict else (0, 0))
status_message.processed_bytes -= last_update[1]
file_progress.new_progress_sum += (
status_message.processed_bytes - last_update[0])
# Updates total progress with new update from component.
self.total_progress += status_message.processed_bytes - last_update[0]
self.new_progress += status_message.processed_bytes - last_update[0]
# Updates file_progress.dict on component's key.
file_progress.dict[key] = (status_message.processed_bytes, last_update[1])
self.last_progress_time = status_message.time
def ProcessMessage(self, status_message, stream):
"""Processes a message from _MainThreadUIQueue or _UIThread.
Args:
status_message: The StatusMessage item to be processed.
stream: Stream to print messages. Here only for SeekAheadThread
"""
self.object_report_change = False
if isinstance(status_message, ProducerThreadMessage):
# ProducerThread info.
self._HandleProducerThreadMessage(status_message)
elif isinstance(status_message, SeekAheadMessage):
# SeekAheadThread info.
self._HandleSeekAheadMessage(status_message, stream)
elif isinstance(status_message, FileMessage):
if self._IsFile(status_message):
# File info.
self._HandleFileDescription(status_message)
else:
# Component info.
self._HandleComponentDescription(status_message)
LogPerformanceSummaryParams(file_message=status_message)
elif isinstance(status_message, ProgressMessage):
# Progress info.
self._HandleProgressMessage(status_message)
elif isinstance(status_message, RetryableErrorMessage):
LogRetryableError(status_message)
elif isinstance(status_message, PerformanceSummaryMessage):
self._HandlePerformanceSummaryMessage(status_message)
self.old_progress.append(
self._ThroughputInformation(self.new_progress, status_message.time))
def PrintProgress(self, stream=sys.stderr):
"""Prints progress and throughput/time estimation.
If a ProducerThreadMessage or SeekAheadMessage has been provided,
it outputs the number of files completed, number of total files,
the current progress, the total size, and the percentage it
represents.
If none of those have been provided, it only includes the number of files
completed, the current progress and total size (which might be updated),
with no percentage as we do not know if more files are coming.
It may also include time estimation (available only given
ProducerThreadMessage or SeekAheadMessage provided) and throughput. For that
to happen, there is an extra condition of at least first_throughput_latency
seconds having been passed since the UIController started, and that
either the ProducerThread or the SeekAheadThread have estimated total
number of files and total size.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
# Time to update all information.
total_remaining = self.total_size - self.total_progress
if self.throughput:
time_remaining = total_remaining / self.throughput
else:
time_remaining = None
char_to_print = self.GetSpinner()
if self.num_objects_source <= EstimationSource.SEEK_AHEAD_THREAD:
# An example of objects_completed here would be ' [2/3 files]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) + '/' +
DecimalShort(self.num_objects) + ' files]')
else:
# An example of objects_completed here would be ' [2 files]'.
objects_completed = '[' + DecimalShort(self.objects_finished) + ' files]'
# An example of bytes_progress would be '[101.0 MiB/1.0 GiB]'.
bytes_progress = (
'[%s/%s]' % (BytesToFixedWidthString(self.total_progress),
BytesToFixedWidthString(self.total_size)))
if self.total_size_source <= EstimationSource.SEEK_AHEAD_THREAD:
if self.num_objects == self.objects_finished:
percentage = '100'
else:
percentage = ('%3d' % min(99, int(100 * float(self.total_progress) /
self.total_size)))
percentage_completed = percentage + '% Done'
else:
percentage_completed = ''
if (self.refresh_message_time - self.start_time >
self.first_throughput_latency):
# Should also include throughput.
# An example of throughput here would be ' 82.3 MiB/s'
throughput = BytesToFixedWidthString(self.throughput) + '/s'
if (self.total_size_source <= EstimationSource.PRODUCER_THREAD_ESTIMATE
and self.throughput):
# Should also include time remaining.
# An example of time remaining would be ' ETA 00:00:11'.
time_remaining_str = 'ETA ' + PrettyTime(time_remaining)
else:
time_remaining_str = ''
else:
throughput = ''
time_remaining_str = ''
format_str = ('{char_to_print} {objects_completed}{bytes_progress}'
' {percentage_completed} {throughput} {time_remaining_str}')
string_to_print = format_str.format(
char_to_print=char_to_print, objects_completed=objects_completed,
bytes_progress=bytes_progress,
percentage_completed=percentage_completed,
throughput=throughput, time_remaining_str=time_remaining_str)
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(string_to_print + (max(remaining_width, 0) * ' ') + '\r')
def CanHandleMessage(self, status_message):
"""Determines whether this manager is suitable for handling status_message.
Args:
status_message: The StatusMessage object to be analyzed.
Returns:
True if this message can be properly handled by this manager,
False otherwise.
"""
if isinstance(status_message, (SeekAheadMessage, ProducerThreadMessage,
FileMessage, ProgressMessage, FinalMessage,
RetryableErrorMessage,
PerformanceSummaryMessage)):
return True
return False
class UIController(object):
"""Controller UI class to integrate _MainThreadUIQueue and _UIThread.
This class receives messages from _MainThreadUIQueue and _UIThread and send
them to an appropriate manager, which will then processes and store data about
them.
"""
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
dump_status_messages_file=None):
"""Instantiates a UIController.
Args:
update_message_period: Minimum period for refreshing and displaying
new information. A non-positive value will ignore any time
restrictions imposed by this field.
update_spinner_period: Minimum period for refreshing and displaying the
spinner. A non-positive value will ignore any time restrictions
imposed by this field.
sliding_throughput_period: Sliding period for throughput calculation. A
non-positive value will make it impossible to calculate the
throughput.
first_throughput_latency: Minimum waiting time before actually displaying
throughput info. A non-positive value will ignore any time
restrictions imposed by this field.
quiet_mode: If True, do not print status messages (but still process
them for analytics reporting as necessary).
custom_time: If a custom start_time is desired. Used for testing.
verbose: Tells whether or not the operation is on verbose mode.
dump_status_messages_file: File path for logging all received status
messages, for debugging purposes.
"""
self.verbose = verbose
self.update_message_period = update_message_period
self.update_spinner_period = update_spinner_period
self.sliding_throughput_period = sliding_throughput_period
self.first_throughput_latency = first_throughput_latency
self.manager = None
self.quiet_mode = quiet_mode
self.custom_time = custom_time
self.console_width = 80 # Console width. Passed to manager.
# List storing all estimation messages from SeekAheadThread or
# ProducerThread. This is used when we still do not know which manager to
# use.
self.early_estimation_messages = []
self.printed_final_message = False
self.dump_status_message_fp = None
if dump_status_messages_file:
self.dump_status_message_fp = open(dump_status_messages_file, 'ab')
def _HandleMessage(self, status_message, stream, cur_time=None):
"""Processes a message, updates throughput and prints progress.
Args:
status_message: Message to be processed. Could be None if UIThread cannot
retrieve message from status_queue.
stream: stream to print messages. Usually sys.stderr, but customizable
for testing.
cur_time: Message time. Used to determine if it is time to refresh
output, or calculate throughput.
"""
self.manager.ProcessMessage(status_message, stream)
if self.manager.ShouldPrintProgress(cur_time):
if self.manager.ShouldTrackThroughput(cur_time):
self.manager.UpdateThroughput(cur_time, self.manager.GetProgress())
self.manager.PrintProgress(stream)
self.manager.refresh_message_time = cur_time
if self.manager.ShouldPrintSpinner(cur_time):
self.manager.PrintSpinner(stream)
self.manager.refresh_spinner_time = cur_time
if ((isinstance(status_message, FinalMessage) or
self.manager.final_message)
and self.manager.num_objects
and not self.printed_final_message):
self.printed_final_message = True
LogPerformanceSummaryParams(
num_objects_transferred=self.manager.num_objects)
self.manager.PrintFinalSummaryMessage(stream)
def Call(self, status_message, stream, cur_time=None):
"""Coordinates UI manager and calls appropriate function to handle message.
Args:
status_message: Message to be processed. Could be None if UIThread cannot
retrieve message from status_queue.
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
cur_time: Message time. Used to determine if it is time to refresh
output, or calculate throughput.
"""
if not isinstance(status_message, StatusMessage):
if status_message == ZERO_TASKS_TO_DO_ARGUMENT and not self.manager:
# Create a manager to handle early estimation messages before returning.
self.manager = (
DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode, custom_time=self.custom_time,
verbose=self.verbose, console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream,
cur_time=estimation_message.time)
return
if self.dump_status_message_fp:
# TODO: Add Unicode support to string methods on message classes.
# Currently, dump will fail with a UnicodeEncodeErorr if the message
# class contains a Unicode attribute.
self.dump_status_message_fp.write(str(status_message))
self.dump_status_message_fp.write('\n')
if not cur_time:
cur_time = status_message.time
if not self.manager:
if (isinstance(status_message, SeekAheadMessage) or
isinstance(status_message, ProducerThreadMessage)):
self.early_estimation_messages.append(status_message)
return
elif isinstance(status_message, MetadataMessage):
self.manager = (
MetadataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode, custom_time=self.custom_time,
verbose=self.verbose, console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
else:
self.manager = (
DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode, custom_time=self.custom_time,
verbose=self.verbose, console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
if not self.manager.CanHandleMessage(status_message):
if (isinstance(status_message, FileMessage) or
isinstance(status_message, ProgressMessage)):
# We have to create a DataManager to handle this data message. This is
# to avoid a possible race condition where MetadataMessages are sent
# before data messages. As such, this means that the DataManager has
# priority, and whenever a data message is received, we ignore the
# MetadataManager if one exists, and start a DataManager from scratch.
# This can be done because we do not need any MetadataMessages to
# properly handle a data operation. It could be useful to send the
# early estimation messages, if those are available.
self.manager = (
DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
custom_time=self.custom_time, verbose=self.verbose,
console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
else:
# No need to handle this message.
return
self._HandleMessage(status_message, stream, cur_time)
class MainThreadUIQueue(object):
"""Handles status display and processing in the main thread / master process.
This class emulates a queue to cover main-thread activity before or after
Apply, as well as for the single-threaded, single-process case, i.e.,
_SequentialApply. When multiple threads or processes are used during calls
to Apply, the main thread is waiting for work to complete, and this queue
must remain unused until Apply returns. Code producing arguments for
Apply (such as the NameExpansionIterator) must not post messages to this
queue to avoid race conditions with the UIThread.
This class sends the messages it receives to UIController, which
decides the correct course of action.
"""
def __init__(self, stream, ui_controller):
"""Instantiates a _MainThreadUIQueue.
Args:
stream: Stream for printing messages.
ui_controller: UIController to manage messages.
"""
super(MainThreadUIQueue, self).__init__()
self.ui_controller = ui_controller
self.stream = stream
# pylint: disable=invalid-name, unused-argument
def put(self, status_message, timeout=None):
self.ui_controller.Call(status_message, self.stream)
# pylint: enable=invalid-name, unused-argument
class UIThread(threading.Thread):
"""Responsible for centralized printing across multiple processes/threads.
This class pulls status messages that are posted to the centralized status
queue and coordinates displaying status and progress to the user. It is
used only during calls to _ParallelApply, which in turn is called only when
multiple threads and/or processes are used.
This class sends the messages it receives to UIController, which
decides the correct course of action.
"""
def __init__(self, status_queue, stream, ui_controller, timeout=1):
"""Instantiates a _UIThread.
Args:
status_queue: Queue for reporting status updates.
stream: Stream for printing messages.
ui_controller: UI controller to manage messages.
timeout: Timeout for getting a message.
"""
super(UIThread, self).__init__()
self.status_queue = status_queue
self.stream = stream
self.timeout = timeout
self.ui_controller = ui_controller
self.start()
def run(self):
try:
while True:
try:
status_message = self.status_queue.get(timeout=self.timeout)
except Queue.Empty:
status_message = None
continue
self.ui_controller.Call(status_message, self.stream)
if status_message == ZERO_TASKS_TO_DO_ARGUMENT:
# Item from MainThread to indicate we are done.
break
except Exception, e: # pylint:disable=broad-except
self.stream.write('Exception in UIThread: %s\n' % e)
| apache-2.0 |
vefimova/rally | tests/unit/plugins/openstack/scenarios/murano/test_environments.py | 4 | 3933 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.murano import environments
from tests.unit import test
CTX = "rally.task.context"
MURANO_SCENARIO = ("rally.plugins.openstack.scenarios.murano."
"environments.MuranoEnvironments")
class MuranoEnvironmentsTestCase(test.TestCase):
def _get_context(self):
return {
"tenant": {
"packages": [mock.MagicMock(fully_qualified_name="fake")]
},
"user": {
"tenant_id": "fake_tenant_id"
},
"config": {
"murano_packages": {
"app_package": (
"rally-jobs/extra/murano/"
"applications/HelloReporter/"
"io.murano.apps.HelloReporter.zip")
}
}
}
@mock.patch(MURANO_SCENARIO + "._list_environments")
def test_list_environments(self, mock__list_environments):
scenario = environments.MuranoEnvironments()
scenario._list_environments()
mock__list_environments.assert_called_once_with()
@mock.patch(MURANO_SCENARIO + "._create_session")
@mock.patch(MURANO_SCENARIO + "._delete_environment")
@mock.patch(MURANO_SCENARIO + "._create_environment")
@mock.patch(MURANO_SCENARIO + "._generate_random_name")
def test_create_and_delete_environment(
self, mock__generate_random_name, mock__create_environment,
mock__delete_environment, mock__create_session):
scenario = environments.MuranoEnvironments()
fake_environment = mock.Mock(id="fake_id")
mock__create_environment.return_value = fake_environment
mock__generate_random_name.return_value = "foo"
scenario.create_and_delete_environment()
mock__create_environment.assert_called_once_with()
mock__create_session.assert_called_once_with(fake_environment.id)
mock__delete_environment.assert_called_once_with(fake_environment)
@mock.patch(MURANO_SCENARIO + "._create_environment")
@mock.patch(MURANO_SCENARIO + "._create_session")
@mock.patch(MURANO_SCENARIO + "._create_service")
@mock.patch(MURANO_SCENARIO + "._deploy_environment")
def test_create_and_deploy_environment(
self, mock__deploy_environment, mock__create_service,
mock__create_session, mock__create_environment):
fake_environment = mock.MagicMock(id="fake_env_id")
mock__create_environment.return_value = fake_environment
fake_session = mock.Mock(id="fake_session_id")
mock__create_session.return_value = fake_session
scenario = environments.MuranoEnvironments()
scenario.context = self._get_context()
scenario.context["tenants"] = {
"fake_tenant_id": {
"packages": [mock.MagicMock()]
}
}
scenario.create_and_deploy_environment(1)
mock__create_environment.assert_called_once_with()
mock__create_session.assert_called_once_with(fake_environment.id)
mock__create_service.assert_called_once_with(
fake_environment, fake_session, "fake", atomic_action=False)
mock__deploy_environment.assert_called_once_with(
fake_environment, fake_session)
| apache-2.0 |
athompso/ansible-modules-core | system/service.py | 40 | 58069 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: service
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.1"
short_description: Manage services.
description:
- Controls services on remote hosts. Supported init systems include BSD init,
OpenRC, SysV, Solaris SMF, systemd, upstart.
options:
name:
required: true
description:
- Name of the service.
state:
required: false
choices: [ started, stopped, restarted, reloaded ]
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service. C(reloaded) will always reload. B(At least one of state
and enabled are required.)
sleep:
required: false
version_added: "1.3"
description:
- If the service is being C(restarted) then sleep this many seconds
between the stop and start command. This helps to workaround badly
behaving init scripts that exit immediately after signaling a process
to stop.
pattern:
required: false
version_added: "0.7"
description:
- If the service does not respond to the status command, name a
substring to look for as would be found in the output of the I(ps)
command as a stand-in for a status result. If the string is found,
the service will be assumed to be running.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Whether the service should start on boot. B(At least one of state and
enabled are required.)
runlevel:
required: false
default: 'default'
description:
- "For OpenRC init scripts (ex: Gentoo) only. The runlevel that this service belongs to."
arguments:
description:
- Additional arguments provided on the command line
aliases: [ 'args' ]
must_exist:
required: false
default: true
version_added: "2.0"
description:
- Avoid a module failure if the named service does not exist. Useful
for opportunistically starting/stopping/restarting a list of
potential services.
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- service: name=httpd state=started
# Example action to stop service httpd, if running
- service: name=httpd state=stopped
# Example action to restart service httpd, in all cases
- service: name=httpd state=restarted
# Example action to reload service httpd, in all cases
- service: name=httpd state=reloaded
# Example action to enable service httpd, and not touch the running state
- service: name=httpd enabled=yes
# Example action to start service foo, based on running process /usr/bin/foo
- service: name=foo pattern=/usr/bin/foo state=started
# Example action to restart network service for interface eth0
- service: name=network state=restarted args=eth0
# Example action to restart nova-compute if it exists
- service: name=nova-compute state=restarted must_exist=no
'''
import platform
import os
import re
import tempfile
import shlex
import select
import time
import string
import glob
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
class Service(object):
"""
This is the generic Service manipulation class that is subclassed
based on platform.
A subclass should override the following action methods:-
- get_service_tools
- service_enable
- get_service_status
- service_control
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Service, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.state = module.params['state']
self.sleep = module.params['sleep']
self.pattern = module.params['pattern']
self.enable = module.params['enabled']
self.runlevel = module.params['runlevel']
self.changed = False
self.running = None
self.crashed = None
self.action = None
self.svc_cmd = None
self.svc_initscript = None
self.svc_initctl = None
self.enable_cmd = None
self.arguments = module.params.get('arguments', '')
self.rcconf_file = None
self.rcconf_key = None
self.rcconf_value = None
self.svc_change = False
# select whether we dump additional debug info through syslog
self.syslogging = False
# ===========================================
# Platform specific methods (must be replaced by subclass).
def get_service_tools(self):
self.module.fail_json(msg="get_service_tools not implemented on target platform")
def service_enable(self):
self.module.fail_json(msg="service_enable not implemented on target platform")
def get_service_status(self):
self.module.fail_json(msg="get_service_status not implemented on target platform")
def service_control(self):
self.module.fail_json(msg="service_control not implemented on target platform")
# ===========================================
# Generic methods that should be used on all platforms.
def execute_command(self, cmd, daemonize=False):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s, daemonize %r' % (cmd, daemonize))
# Most things don't need to be daemonized
if not daemonize:
return self.module.run_command(cmd)
# This is complex because daemonization is hard for people.
# What we do is daemonize a part of this module, the daemon runs the
# command, picks up the return code and output, and returns it to the
# main process.
pipe = os.pipe()
pid = os.fork()
if pid == 0:
os.close(pipe[0])
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
if fd != 0:
os.dup2(fd, 0)
if fd != 1:
os.dup2(fd, 1)
if fd != 2:
os.dup2(fd, 2)
if fd not in (0, 1, 2):
os.close(fd)
# Make us a daemon. Yes, that's all it takes.
pid = os.fork()
if pid > 0:
os._exit(0)
os.setsid()
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
# Start the command
if isinstance(cmd, basestring):
cmd = shlex.split(cmd)
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
stdout = ""
stderr = ""
fds = [p.stdout, p.stderr]
# Wait for all output, or until the main process is dead and its output is done.
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if not (rfd + wfd + efd) and p.poll() is not None:
break
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 4096)
if not dat:
fds.remove(p.stdout)
stdout += dat
if p.stderr in rfd:
dat = os.read(p.stderr.fileno(), 4096)
if not dat:
fds.remove(p.stderr)
stderr += dat
p.wait()
# Return a JSON blob to parent
os.write(pipe[1], json.dumps([p.returncode, stdout, stderr]))
os.close(pipe[1])
os._exit(0)
elif pid == -1:
self.module.fail_json(msg="unable to fork")
else:
os.close(pipe[1])
os.waitpid(pid, 0)
# Wait for data from daemon process and process it.
data = ""
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
dat = os.read(pipe[0], 4096)
if not dat:
break
data += dat
return json.loads(data)
def check_ps(self):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = self.module.get_bin_path('ps', True)
(rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
self.running = False
lines = psout.split("\n")
for line in lines:
if self.pattern in line and not "pattern=" in line:
# so as to not confuse ./hacking/test-module
self.running = True
break
def check_service_changed(self):
if self.state and self.running is None:
self.module.fail_json(msg="failed determining service state, possible typo of service name?")
# Find out if state has changed
if not self.running and self.state in ["started", "running", "reloaded"]:
self.svc_change = True
elif self.running and self.state in ["stopped","reloaded"]:
self.svc_change = True
elif self.state == "restarted":
self.svc_change = True
if self.module.check_mode and self.svc_change:
self.module.exit_json(changed=True, msg='service state changed')
def modify_service_state(self):
# Only do something if state will change
if self.svc_change:
# Control service
if self.state in ['started', 'running']:
self.action = "start"
elif not self.running and self.state == 'reloaded':
self.action = "start"
elif self.state == 'stopped':
self.action = "stop"
elif self.state == 'reloaded':
self.action = "reload"
elif self.state == 'restarted':
self.action = "restart"
if self.module.check_mode:
self.module.exit_json(changed=True, msg='changing service state')
return self.service_control()
else:
# If nothing needs to change just say all is well
rc = 0
err = ''
out = ''
return rc, out, err
def service_enable_rcconf(self):
if self.rcconf_file is None or self.rcconf_key is None or self.rcconf_value is None:
self.module.fail_json(msg="service_enable_rcconf() requires rcconf_file, rcconf_key and rcconf_value")
self.changed = None
entry = '%s="%s"\n' % (self.rcconf_key, self.rcconf_value)
RCFILE = open(self.rcconf_file, "r")
new_rc_conf = []
# Build a list containing the possibly modified file.
for rcline in RCFILE:
# Parse line removing whitespaces, quotes, etc.
rcarray = shlex.split(rcline, comments=True)
if len(rcarray) >= 1 and '=' in rcarray[0]:
(key, value) = rcarray[0].split("=", 1)
if key == self.rcconf_key:
if value.upper() == self.rcconf_value:
# Since the proper entry already exists we can stop iterating.
self.changed = False
break
else:
# We found the key but the value is wrong, replace with new entry.
rcline = entry
self.changed = True
# Add line to the list.
new_rc_conf.append(rcline.strip() + '\n')
# We are done with reading the current rc.conf, close it.
RCFILE.close()
# If we did not see any trace of our entry we need to add it.
if self.changed is None:
new_rc_conf.append(entry)
self.changed = True
if self.changed is True:
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
# Create a temporary file next to the current rc.conf (so we stay on the same filesystem).
# This way the replacement operation is atomic.
rcconf_dir = os.path.dirname(self.rcconf_file)
rcconf_base = os.path.basename(self.rcconf_file)
(TMP_RCCONF, tmp_rcconf_file) = tempfile.mkstemp(dir=rcconf_dir, prefix="%s-" % rcconf_base)
# Write out the contents of the list into our temporary file.
for rcline in new_rc_conf:
os.write(TMP_RCCONF, rcline)
# Close temporary file.
os.close(TMP_RCCONF)
# Replace previous rc.conf.
self.module.atomic_move(tmp_rcconf_file, self.rcconf_file)
# ===========================================
# Subclass: Linux
class LinuxService(Service):
"""
This is the Linux Service manipulation class - it is currently supporting
a mixture of binaries and init scripts for controlling services started at
boot, as well as for controlling the current state.
"""
platform = 'Linux'
distribution = None
def get_service_tools(self):
paths = [ '/sbin', '/usr/sbin', '/bin', '/usr/bin' ]
binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv' ]
initpaths = [ '/etc/init.d' ]
location = dict()
for binary in binaries:
location[binary] = self.module.get_bin_path(binary)
for initdir in initpaths:
initscript = "%s/%s" % (initdir,self.name)
if os.path.isfile(initscript):
self.svc_initscript = initscript
def check_systemd():
# verify systemd is installed (by finding systemctl)
if not location.get('systemctl', False):
return False
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError, err:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
# Locate a tool to enable/disable a service
if location.get('systemctl',False) and check_systemd():
# service is managed by systemd
self.__systemd_unit = self.name
self.svc_cmd = location['systemctl']
self.enable_cmd = location['systemctl']
elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name):
# service is managed by upstart
self.enable_cmd = location['initctl']
# set the upstart version based on the output of 'initctl version'
self.upstart_version = LooseVersion('0.0.0')
try:
version_re = re.compile(r'\(upstart (.*)\)')
rc,stdout,stderr = self.module.run_command('initctl version')
if rc == 0:
res = version_re.search(stdout)
if res:
self.upstart_version = LooseVersion(res.groups()[0])
except:
pass # we'll use the default of 0.0.0
if location.get('start', False):
# upstart -- rather than being managed by one command, start/stop/restart are actual commands
self.svc_cmd = ''
elif location.get('rc-service', False):
# service is managed by OpenRC
self.svc_cmd = location['rc-service']
self.enable_cmd = location['rc-update']
return # already have service start/stop tool too!
elif self.svc_initscript:
# service is managed by with SysV init scripts
if location.get('update-rc.d', False):
# and uses update-rc.d
self.enable_cmd = location['update-rc.d']
elif location.get('insserv', None):
# and uses insserv
self.enable_cmd = location['insserv']
elif location.get('chkconfig', False):
# and uses chkconfig
self.enable_cmd = location['chkconfig']
if self.enable_cmd is None:
if self.module.params['must_exist']:
self.module.fail_json(msg="no service or tool found for: %s" % self.name)
else:
# exiting without change on non-existent service
self.module.exit_json(changed=False, exists=False)
# If no service control tool selected yet, try to see if 'service' is available
if self.svc_cmd is None and location.get('service', False):
self.svc_cmd = location['service']
# couldn't find anything yet
if self.svc_cmd is None and not self.svc_initscript:
if self.module.params['must_exist']:
self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
else:
# exiting without change on non-existent service
self.module.exit_json(changed=False, exists=False)
if location.get('initctl', False):
self.svc_initctl = location['initctl']
def get_systemd_service_enabled(self):
def sysv_exists(name):
script = '/etc/init.d/' + name
return os.access(script, os.X_OK)
def sysv_is_enabled(name):
return bool(glob.glob('/etc/rc?.d/S??' + name))
service_name = self.__systemd_unit
(rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, service_name,))
if rc == 0:
return True
elif sysv_exists(service_name):
return sysv_is_enabled(service_name)
else:
return False
def get_systemd_status_dict(self):
# Check status first as show will not fail if service does not exist
(rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,))
if rc != 0:
self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err))
elif 'LoadState=not-found' in out:
self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err))
key = None
value_buffer = []
status_dict = {}
for line in out.splitlines():
if not key:
key, value = line.split('=', 1)
# systemd fields that are shell commands can be multi-line
# We take a value that begins with a "{" as the start of
# a shell command and a line that ends with "}" as the end of
# the command
if value.lstrip().startswith('{'):
if value.rstrip().endswith('}'):
status_dict[key] = value
key = None
else:
value_buffer.append(value)
else:
status_dict[key] = value
key = None
else:
if line.rstrip().endswith('}'):
status_dict[key] = '\n'.join(value_buffer)
key = None
else:
value_buffer.append(value)
return status_dict
def get_systemd_service_status(self):
d = self.get_systemd_status_dict()
if d.get('ActiveState') == 'active':
# run-once services (for which a single successful exit indicates
# that they are running as designed) should not be restarted here.
# Thus, we are not checking d['SubState'].
self.running = True
self.crashed = False
elif d.get('ActiveState') == 'failed':
self.running = False
self.crashed = True
elif d.get('ActiveState') is None:
self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
else:
self.running = False
self.crashed = False
return self.running
def get_service_status(self):
if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
return self.get_systemd_service_status()
self.action = "status"
rc, status_stdout, status_stderr = self.service_control()
# if we have decided the service is managed by upstart, we check for some additional output...
if self.svc_initctl and self.running is None:
# check the job status by upstart response
initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s" % (self.svc_initctl, self.name))
if "stop/waiting" in initctl_status_stdout:
self.running = False
elif "start/running" in initctl_status_stdout:
self.running = True
if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
openrc_rc, openrc_status_stdout, openrc_status_stderr = self.execute_command("%s %s status" % (self.svc_cmd, self.name))
self.running = "started" in openrc_status_stdout
self.crashed = "crashed" in openrc_status_stderr
# Prefer a non-zero return code. For reference, see:
# http://refspecs.linuxbase.org/LSB_4.1.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
if self.running is None and rc in [1, 2, 3, 4, 69]:
self.running = False
# if the job status is still not known check it by status output keywords
# Only check keywords if there's only one line of output (some init
# scripts will output verbosely in case of error and those can emit
# keywords that are picked up as false positives
if self.running is None and status_stdout.count('\n') <= 1:
# first transform the status output that could irritate keyword matching
cleanout = status_stdout.lower().replace(self.name.lower(), '')
if "stop" in cleanout:
self.running = False
elif "run" in cleanout and "not" in cleanout:
self.running = False
elif "run" in cleanout and "not" not in cleanout:
self.running = True
elif "start" in cleanout and "not" not in cleanout:
self.running = True
elif 'could not access pid file' in cleanout:
self.running = False
elif 'is dead and pid file exists' in cleanout:
self.running = False
elif 'dead but subsys locked' in cleanout:
self.running = False
elif 'dead but pid file exists' in cleanout:
self.running = False
# if the job status is still not known and we got a zero for the
# return code, assume here that the service is running
if self.running is None and rc == 0:
self.running = True
# if the job status is still not known check it by special conditions
if self.running is None:
if self.name == 'iptables' and "ACCEPT" in status_stdout:
# iptables status command output is lame
# TODO: lookup if we can use a return code for this instead?
self.running = True
return self.running
def service_enable(self):
if self.enable_cmd is None:
self.module.fail_json(msg='cannot detect command to enable service %s, typo or init system potentially unknown' % self.name)
self.changed = True
action = None
#
# Upstart's initctl
#
if self.enable_cmd.endswith("initctl"):
def write_to_override_file(file_name, file_contents, ):
override_file = open(file_name, 'w')
override_file.write(file_contents)
override_file.close()
initpath = '/etc/init'
if self.upstart_version >= LooseVersion('0.6.7'):
manreg = re.compile('^manual\s*$', re.M | re.I)
config_line = 'manual\n'
else:
manreg = re.compile('^start on manual\s*$', re.M | re.I)
config_line = 'start on manual\n'
conf_file_name = "%s/%s.conf" % (initpath, self.name)
override_file_name = "%s/%s.override" % (initpath, self.name)
# Check to see if files contain the manual line in .conf and fail if True
if manreg.search(open(conf_file_name).read()):
self.module.fail_json(msg="manual stanza not supported in a .conf file")
self.changed = False
if os.path.exists(override_file_name):
override_file_contents = open(override_file_name).read()
# Remove manual stanza if present and service enabled
if self.enable and manreg.search(override_file_contents):
self.changed = True
override_state = manreg.sub('', override_file_contents)
# Add manual stanza if not present and service disabled
elif not (self.enable) and not (manreg.search(override_file_contents)):
self.changed = True
override_state = '\n'.join((override_file_contents, config_line))
# service already in desired state
else:
pass
# Add file with manual stanza if service disabled
elif not (self.enable):
self.changed = True
override_state = config_line
else:
# service already in desired state
pass
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
# The initctl method of enabling and disabling services is much
# different than for the other service methods. So actually
# committing the change is done in this conditional and then we
# skip the boilerplate at the bottom of the method
if self.changed:
try:
write_to_override_file(override_file_name, override_state)
except:
self.module.fail_json(msg='Could not modify override file')
return
#
# SysV's chkconfig
#
if self.enable_cmd.endswith("chkconfig"):
if self.enable:
action = 'on'
else:
action = 'off'
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if 'chkconfig --add %s' % self.name in err:
self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if not self.name in out:
self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
state = out.split()[-1]
# Check if we're already in the correct state
if "3:%s" % action in out and "5:%s" % action in out:
self.changed = False
return
#
# Systemd's systemctl
#
if self.enable_cmd.endswith("systemctl"):
if self.enable:
action = 'enable'
else:
action = 'disable'
# Check if we're already in the correct state
service_enabled = self.get_systemd_service_enabled()
# self.changed should already be true
if self.enable == service_enabled:
self.changed = False
return
#
# OpenRC's rc-update
#
if self.enable_cmd.endswith("rc-update"):
if self.enable:
action = 'add'
else:
action = 'delete'
(rc, out, err) = self.execute_command("%s show" % self.enable_cmd)
for line in out.splitlines():
service_name, runlevels = line.split('|')
service_name = service_name.strip()
if service_name != self.name:
continue
runlevels = re.split(r'\s+', runlevels)
# service already enabled for the runlevel
if self.enable and self.runlevel in runlevels:
self.changed = False
# service already disabled for the runlevel
elif not self.enable and self.runlevel not in runlevels:
self.changed = False
break
else:
# service already disabled altogether
if not self.enable:
self.changed = False
if not self.changed:
return
#
# update-rc.d style
#
if self.enable_cmd.endswith("update-rc.d"):
enabled = False
slinks = glob.glob('/etc/rc?.d/S??' + self.name)
if slinks:
enabled = True
if self.enable != enabled:
self.changed = True
if self.enable:
action = 'enable'
klinks = glob.glob('/etc/rc?.d/K??' + self.name)
if not klinks:
(rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
action = 'disable'
if self.module.check_mode:
rc = 0
return
(rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
self.changed = False
return
#
# insserv (Debian 7)
#
if self.enable_cmd.endswith("insserv"):
if self.enable:
(rc, out, err) = self.execute_command("%s -n %s" % (self.enable_cmd, self.name))
else:
(rc, out, err) = self.execute_command("%s -nr %s" % (self.enable_cmd, self.name))
self.changed = False
for line in err.splitlines():
if self.enable and line.find('enable service') != -1:
self.changed = True
break
if not self.enable and line.find('remove service') != -1:
self.changed = True
break
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
if not self.changed:
return
if self.enable:
(rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
else:
(rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
#
# If we've gotten to the end, the service needs to be updated
#
self.changed = True
# we change argument order depending on real binary used:
# rc-update and systemctl need the argument order reversed
if self.enable_cmd.endswith("rc-update"):
args = (self.enable_cmd, action, self.name + " " + self.runlevel)
elif self.enable_cmd.endswith("systemctl"):
args = (self.enable_cmd, action, self.__systemd_unit)
else:
args = (self.enable_cmd, self.name, action)
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
(rc, out, err) = self.execute_command("%s %s %s" % args)
if rc != 0:
if err:
self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err))
else:
self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out))
return (rc, out, err)
def service_control(self):
# Decide what command to run
svc_cmd = ''
arguments = self.arguments
if self.svc_cmd:
if not self.svc_cmd.endswith("systemctl"):
# SysV and OpenRC take the form <cmd> <name> <action>
svc_cmd = "%s %s" % (self.svc_cmd, self.name)
else:
# systemd commands take the form <cmd> <action> <name>
svc_cmd = self.svc_cmd
arguments = "%s %s" % (self.__systemd_unit, arguments)
elif self.svc_cmd is None and self.svc_initscript:
# upstart
svc_cmd = "%s" % self.svc_initscript
# In OpenRC, if a service crashed, we need to reset its status to
# stopped with the zap command, before we can start it back.
if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
self.execute_command("%s zap" % svc_cmd, daemonize=True)
if self.action != "restart":
if svc_cmd != '':
# upstart or systemd or OpenRC
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# SysV
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (self.action, self.name, arguments), daemonize=True)
elif self.svc_cmd and self.svc_cmd.endswith('rc-service'):
# All services in OpenRC support restart.
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
else:
# In other systems, not all services support restart. Do it the hard way.
if svc_cmd != '':
# upstart or systemd
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % (svc_cmd, 'stop', arguments), daemonize=True)
else:
# SysV
rc1, stdout1, stderr1 = self.execute_command("%s %s %s" % ('stop', self.name, arguments), daemonize=True)
if self.sleep:
time.sleep(self.sleep)
if svc_cmd != '':
# upstart or systemd
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % (svc_cmd, 'start', arguments), daemonize=True)
else:
# SysV
rc2, stdout2, stderr2 = self.execute_command("%s %s %s" % ('start', self.name, arguments), daemonize=True)
# merge return information
if rc1 != 0 and rc2 == 0:
rc_state = rc2
stdout = stdout2
stderr = stderr2
else:
rc_state = rc1 + rc2
stdout = stdout1 + stdout2
stderr = stderr1 + stderr2
return(rc_state, stdout, stderr)
# ===========================================
# Subclass: FreeBSD
class FreeBsdService(Service):
"""
This is the FreeBSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot and the 'service' binary to
check status and perform direct service manipulation.
"""
platform = 'FreeBSD'
distribution = None
def get_service_tools(self):
self.svc_cmd = self.module.get_bin_path('service', True)
if not self.svc_cmd:
self.module.fail_json(msg='unable to find service binary')
def get_service_status(self):
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'onestatus', self.arguments))
if self.name == "pf":
self.running = "Enabled" in stdout
else:
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_enable(self):
if self.enable:
self.rcconf_value = "YES"
else:
self.rcconf_value = "NO"
rcfiles = [ '/etc/rc.conf','/etc/rc.conf.local', '/usr/local/etc/rc.conf' ]
for rcfile in rcfiles:
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)
try:
rcvars = shlex.split(stdout, comments=True)
except:
#TODO: add a warning to the output with the failure
pass
if not rcvars:
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
# In rare cases, i.e. sendmail, rcvar can return several key=value pairs
# Usually there is just one, however. In other rare cases, i.e. uwsgi,
# rcvar can return extra uncommented data that is not at all related to
# the rcvar. We will just take the first key=value pair we come across
# and hope for the best.
for rcvar in rcvars:
if '=' in rcvar:
self.rcconf_key = rcvar.split('=')[0]
break
if self.rcconf_key is None:
self.module.fail_json(msg="unable to determine rcvar", stdout=stdout, stderr=stderr)
try:
return self.service_enable_rcconf()
except Exception:
self.module.fail_json(msg='unable to set rcvar')
def service_control(self):
if self.action == "start":
self.action = "onestart"
if self.action == "stop":
self.action = "onestop"
if self.action == "reload":
self.action = "onereload"
return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments))
# ===========================================
# Subclass: OpenBSD
class OpenBsdService(Service):
"""
This is the OpenBSD Service manipulation class - it uses rcctl(8) or
/etc/rc.d scripts for service control. Enabling a service is
only supported if rcctl is present.
"""
platform = 'OpenBSD'
distribution = None
def get_service_tools(self):
self.enable_cmd = self.module.get_bin_path('rcctl')
if self.enable_cmd:
self.svc_cmd = self.enable_cmd
else:
rcdir = '/etc/rc.d'
rc_script = "%s/%s" % (rcdir, self.name)
if os.path.isfile(rc_script):
self.svc_cmd = rc_script
if not self.svc_cmd:
self.module.fail_json(msg='unable to find svc_cmd')
def get_service_status(self):
if self.enable_cmd:
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name))
else:
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
if stderr:
self.module.fail_json(msg=stderr)
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_control(self):
if self.enable_cmd:
return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name))
else:
return self.execute_command("%s -f %s" % (self.svc_cmd, self.action))
def service_enable(self):
if not self.enable_cmd:
return super(OpenBsdService, self).service_enable()
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags'))
if stderr:
self.module.fail_json(msg=stderr)
getdef_string = stdout.rstrip()
# Depending on the service the string returned from 'getdef' may be
# either a set of flags or the boolean YES/NO
if getdef_string == "YES" or getdef_string == "NO":
default_flags = ''
else:
default_flags = getdef_string
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags'))
if stderr:
self.module.fail_json(msg=stderr)
get_string = stdout.rstrip()
# Depending on the service the string returned from 'get' may be
# either a set of flags or the boolean YES/NO
if get_string == "YES" or get_string == "NO":
current_flags = ''
else:
current_flags = get_string
# If there are arguments from the user we use these as flags unless
# they are already set.
if self.arguments and self.arguments != current_flags:
changed_flags = self.arguments
# If the user has not supplied any arguments and the current flags
# differ from the default we reset them.
elif not self.arguments and current_flags != default_flags:
changed_flags = ' '
# Otherwise there is no need to modify flags.
else:
changed_flags = ''
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status'))
if self.enable:
if rc == 0 and not changed_flags:
return
if rc != 0:
status_action = "set %s status on" % (self.name)
else:
status_action = ''
if changed_flags:
flags_action = "set %s flags %s" % (self.name, changed_flags)
else:
flags_action = ''
else:
if rc == 1:
return
status_action = "set %s status off" % self.name
flags_action = ''
# Verify state assumption
if not status_action and not flags_action:
self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen")
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
status_modified = 0
if status_action:
rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg="rcctl failed to modify service status")
status_modified = 1
if flags_action:
rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action))
if rc != 0:
if stderr:
if status_modified:
error_message = "rcctl modified service status but failed to set flags: " + stderr
else:
error_message = stderr
else:
if status_modified:
error_message = "rcctl modified service status but failed to set flags"
else:
error_message = "rcctl failed to modify service flags"
self.module.fail_json(msg=error_message)
self.changed = True
# ===========================================
# Subclass: NetBSD
class NetBsdService(Service):
"""
This is the NetBSD Service manipulation class - it uses the /etc/rc.conf
file for controlling services started at boot, check status and perform
direct service manipulation. Init scripts in /etc/rcd are used for
controlling services (start/stop) as well as for controlling the current
state.
"""
platform = 'NetBSD'
distribution = None
def get_service_tools(self):
initpaths = [ '/etc/rc.d' ] # better: $rc_directories - how to get in here? Run: sh -c '. /etc/rc.conf ; echo $rc_directories'
for initdir in initpaths:
initscript = "%s/%s" % (initdir,self.name)
if os.path.isfile(initscript):
self.svc_initscript = initscript
if not self.svc_initscript:
self.module.fail_json(msg='unable to find rc.d script')
def service_enable(self):
if self.enable:
self.rcconf_value = "YES"
else:
self.rcconf_value = "NO"
rcfiles = [ '/etc/rc.conf' ] # Overkill?
for rcfile in rcfiles:
if os.path.isfile(rcfile):
self.rcconf_file = rcfile
self.rcconf_key = "%s" % string.replace(self.name,"-","_")
return self.service_enable_rcconf()
def get_service_status(self):
self.svc_cmd = "%s" % self.svc_initscript
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'onestatus'))
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_control(self):
if self.action == "start":
self.action = "onestart"
if self.action == "stop":
self.action = "onestop"
self.svc_cmd = "%s" % self.svc_initscript
return self.execute_command("%s %s" % (self.svc_cmd, self.action), daemonize=True)
# ===========================================
# Subclass: SunOS
class SunOSService(Service):
"""
This is the SunOS Service manipulation class - it uses the svcadm
command for controlling services, and svcs command for checking status.
It also tries to be smart about taking the service out of maintenance
state if necessary.
"""
platform = 'SunOS'
distribution = None
def get_service_tools(self):
self.svcs_cmd = self.module.get_bin_path('svcs', True)
if not self.svcs_cmd:
self.module.fail_json(msg='unable to find svcs binary')
self.svcadm_cmd = self.module.get_bin_path('svcadm', True)
if not self.svcadm_cmd:
self.module.fail_json(msg='unable to find svcadm binary')
def get_service_status(self):
status = self.get_sunos_svcs_status()
# Only 'online' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'online':
self.running = True
else:
self.running = False
def get_sunos_svcs_status(self):
rc, stdout, stderr = self.execute_command("%s %s" % (self.svcs_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[0]
# status is one of: online, offline, degraded, disabled, maintenance, uninitialized
# see man svcs(1)
return status
def service_enable(self):
# Get current service enablement status
rc, stdout, stderr = self.execute_command("%s -l %s" % (self.svcs_cmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
enabled = False
temporary = False
# look for enabled line, which could be one of:
# enabled true (temporary)
# enabled false (temporary)
# enabled true
# enabled false
for line in stdout.split("\n"):
if line.startswith("enabled"):
if "true" in line:
enabled = True
if "temporary" in line:
temporary = True
startup_enabled = (enabled and not temporary) or (not enabled and temporary)
if self.enable and startup_enabled:
return
elif (not self.enable) and (not startup_enabled):
return
# Mark service as started or stopped (this will have the side effect of
# actually stopping or starting the service)
if self.enable:
subcmd = "enable -rs"
else:
subcmd = "disable -s"
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
self.changed = True
def service_control(self):
status = self.get_sunos_svcs_status()
# if starting or reloading, clear maintenace states
if self.action in ['start', 'reload', 'restart'] and status in ['maintenance', 'degraded']:
rc, stdout, stderr = self.execute_command("%s clear %s" % (self.svcadm_cmd, self.name))
if rc != 0:
return rc, stdout, stderr
status = self.get_sunos_svcs_status()
if status in ['maintenance', 'degraded']:
self.module.fail_json(msg="Failed to bring service out of %s status." % status)
if self.action == 'start':
subcmd = "enable -rst"
elif self.action == 'stop':
subcmd = "disable -st"
elif self.action == 'reload':
subcmd = "refresh"
elif self.action == 'restart' and status == 'online':
subcmd = "restart"
elif self.action == 'restart' and status != 'online':
subcmd = "enable -rst"
return self.execute_command("%s %s %s" % (self.svcadm_cmd, subcmd, self.name))
# ===========================================
# Subclass: AIX
class AIX(Service):
"""
This is the AIX Service (SRC) manipulation class - it uses lssrc, startsrc, stopsrc
and refresh for service control. Enabling a service is currently not supported.
Would require to add an entry in the /etc/inittab file (mkitab, chitab and rmitab
commands)
"""
platform = 'AIX'
distribution = None
def get_service_tools(self):
self.lssrc_cmd = self.module.get_bin_path('lssrc', True)
if not self.lssrc_cmd:
self.module.fail_json(msg='unable to find lssrc binary')
self.startsrc_cmd = self.module.get_bin_path('startsrc', True)
if not self.startsrc_cmd:
self.module.fail_json(msg='unable to find startsrc binary')
self.stopsrc_cmd = self.module.get_bin_path('stopsrc', True)
if not self.stopsrc_cmd:
self.module.fail_json(msg='unable to find stopsrc binary')
self.refresh_cmd = self.module.get_bin_path('refresh', True)
if not self.refresh_cmd:
self.module.fail_json(msg='unable to find refresh binary')
def get_service_status(self):
status = self.get_aix_src_status()
# Only 'active' is considered properly running. Everything else is off
# or has some sort of problem.
if status == 'active':
self.running = True
else:
self.running = False
def get_aix_src_status(self):
rc, stdout, stderr = self.execute_command("%s -s %s" % (self.lssrc_cmd, self.name))
if rc == 1:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg=stdout)
lines = stdout.rstrip("\n").split("\n")
status = lines[-1].split(" ")[-1]
# status is one of: active, inoperative
return status
def service_control(self):
if self.action == 'start':
srccmd = self.startsrc_cmd
elif self.action == 'stop':
srccmd = self.stopsrc_cmd
elif self.action == 'reload':
srccmd = self.refresh_cmd
elif self.action == 'restart':
self.execute_command("%s -s %s" % (self.stopsrc_cmd, self.name))
srccmd = self.startsrc_cmd
if self.arguments and self.action == 'start':
return self.execute_command("%s -a \"%s\" -s %s" % (srccmd, self.arguments, self.name))
else:
return self.execute_command("%s -s %s" % (srccmd, self.name))
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']),
sleep = dict(required=False, type='int', default=None),
pattern = dict(required=False, default=None),
enabled = dict(type='bool'),
runlevel = dict(required=False, default='default'),
arguments = dict(aliases=['args'], default=''),
must_exist = dict(type='bool', default=True),
),
supports_check_mode=True
)
if module.params['state'] is None and module.params['enabled'] is None:
module.fail_json(msg="Neither 'state' nor 'enabled' set")
service = Service(module)
if service.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - platform %s' % service.platform)
if service.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - distribution %s' % service.distribution)
rc = 0
out = ''
err = ''
result = {}
result['name'] = service.name
# Find service management tools
service.get_service_tools()
# Enable/disable service startup at boot if requested
if service.module.params['enabled'] is not None:
# FIXME: ideally this should detect if we need to toggle the enablement state, though
# it's unlikely the changed handler would need to fire in this case so it's a minor thing.
service.service_enable()
result['enabled'] = service.enable
if module.params['state'] is None:
# Not changing the running state, so bail out now.
result['changed'] = service.changed
module.exit_json(**result)
result['state'] = service.state
# Collect service status
if service.pattern:
service.check_ps()
else:
service.get_service_status()
# Calculate if request will change service state
service.check_service_changed()
# Modify service state if necessary
(rc, out, err) = service.modify_service_state()
if rc != 0:
if err and "Job is already running" in err:
# upstart got confused, one such possibility is MySQL on Ubuntu 12.04
# where status may report it has no start/stop links and we could
# not get accurate status
pass
else:
if err:
module.fail_json(msg=err)
else:
module.fail_json(msg=out)
result['changed'] = service.changed | service.svc_change
if service.module.params['enabled'] is not None:
result['enabled'] = service.module.params['enabled']
if not service.module.params['state']:
status = service.get_service_status()
if status is None:
result['state'] = 'absent'
elif status is False:
result['state'] = 'started'
else:
result['state'] = 'stopped'
else:
# as we may have just bounced the service the service command may not
# report accurate state at this moment so just show what we ran
if service.module.params['state'] in ['started','restarted','running','reloaded']:
result['state'] = 'started'
else:
result['state'] = 'stopped'
module.exit_json(**result)
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
deepmind/open_spiel | open_spiel/python/environments/cliff_walking.py | 1 | 6144 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A cliff walking single agent reinforcement learning environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from open_spiel.python import rl_environment
# Actions
RIGHT, UP, LEFT, DOWN = range(4)
class Environment(object):
r"""A cliff walking reinforcement learning environment.
This is a deterministic environment that can be used to test RL algorithms.
Note there are *no illegal moves* in this environment--if the agent is on the
edge of the cliff and takes an action which would yield an invalid position,
the action is ignored (as if there were walls surrounding the cliff).
Cliff example for height=3 and width=5:
| | | | | |
| | | | | |
| S | x | x | x | G |
where `S` is always the starting position, `G` is always the goal and `x`
represents the zone of high negative reward to be avoided. For this instance,
the optimum policy is depicted as follows:
| | | | | |
|-->|-->|-->|-->|\|/|
|/|\| x | x | x | G |
yielding a reward of -6 (minus 1 per time step).
See pages 132 of Rich Sutton's book for details:
http://www.incompleteideas.net/book/bookdraft2018mar21.pdf
"""
def __init__(self, height=4, width=8, discount=1.0, max_t=100):
if height < 2 or width < 3:
raise ValueError("height must be >= 2 and width >= 3.")
self._height = height
self._width = width
self._legal_actions = [RIGHT, UP, LEFT, DOWN]
self._should_reset = True
self._max_t = max_t
# Discount returned at non-initial steps.
self._discounts = [discount] * self.num_players
def reset(self):
"""Resets the environment."""
self._should_reset = False
self._time_counter = 0
self._state = np.array([self._height - 1, 0])
observations = {
"info_state": [self._state.copy()],
"legal_actions": [self._legal_actions],
"current_player": 0,
}
return rl_environment.TimeStep(
observations=observations,
rewards=None,
discounts=None,
step_type=rl_environment.StepType.FIRST)
def step(self, actions):
"""Updates the environment according to `actions` and returns a `TimeStep`.
Args:
actions: A singleton list with an integer, or an integer, representing the
action the agent took.
Returns:
A `rl_environment.TimeStep` namedtuple containing:
observation: singleton list of dicts containing player observations,
each corresponding to `observation_spec()`.
reward: singleton list containing the reward at this timestep, or None
if step_type is `rl_environment.StepType.FIRST`.
discount: singleton list containing the discount in the range [0, 1], or
None if step_type is `rl_environment.StepType.FIRST`.
step_type: A `rl_environment.StepType` value.
"""
if self._should_reset:
return self.reset()
self._time_counter += 1
if isinstance(actions, list):
action = actions[0]
elif isinstance(actions, int):
action = actions
else:
raise ValueError("Action not supported.", actions)
dx = 0
dy = 0
if action == LEFT:
dx -= 1
elif action == RIGHT:
dx += 1
if action == UP:
dy -= 1
elif action == DOWN:
dy += 1
self._state += np.array([dy, dx])
self._state = self._state.clip(0, [self._height - 1, self._width - 1])
done = self._is_pit(self._state) or self._is_goal(self._state)
done = done or self._time_counter >= self._max_t
# Return observation
step_type = (
rl_environment.StepType.LAST if done else rl_environment.StepType.MID)
self._should_reset = step_type == rl_environment.StepType.LAST
observations = {
"info_state": [self._state.copy()],
"legal_actions": [self._legal_actions],
"current_player": 0,
}
return rl_environment.TimeStep(
observations=observations,
rewards=[self._get_reward(self._state)],
discounts=self._discounts,
step_type=step_type)
def _is_goal(self, pos):
"""Check if position is bottom right corner of grid."""
return pos[0] == self._height - 1 and pos[1] == self._width - 1
def _is_pit(self, pos):
"""Check if position is in bottom row between start and goal."""
return (pos[1] > 0 and pos[1] < self._width - 1 and
pos[0] == self._height - 1)
def _get_reward(self, pos):
if self._is_pit(pos):
return -100.0
else:
return -1.0
def observation_spec(self):
"""Defines the observation provided by the environment.
Each dict member will contain its expected structure and shape.
Returns:
A specification dict describing the observation fields and shapes.
"""
return dict(
info_state=tuple([2]),
legal_actions=(len(self._legal_actions),),
current_player=(),
)
def action_spec(self):
"""Defines action specifications.
Specifications include action boundaries and their data type.
Returns:
A specification dict containing action properties.
"""
return dict(
num_actions=len(self._legal_actions),
min=min(self._legal_actions),
max=max(self._legal_actions),
dtype=int,
)
@property
def num_players(self):
return 1
@property
def is_turn_based(self):
return False
| apache-2.0 |
chasetb/sal | server/views.py | 1 | 66804 | # Create your views here.
from models import *
from inventory.models import *
from django.contrib.auth.decorators import login_required, permission_required
from django.template import RequestContext, Template, Context
import json
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.http import HttpResponse, Http404
from django.contrib.auth.models import Permission, User
from django.conf import settings
from django.core.context_processors import csrf
from django.shortcuts import render_to_response, get_object_or_404, redirect
from datetime import datetime, timedelta, date
from django.db.models import Count, Sum, Max, Q
from django.contrib import messages
from django.contrib.staticfiles.templatetags.staticfiles import static
import plistlib
import ast
from forms import *
import pprint
import re
import os
from yapsy.PluginManager import PluginManager
from django.core.exceptions import PermissionDenied
import utils
import pytz
import watson
import unicodecsv as csv
import django.utils.timezone
import dateutil.parser
import hashlib
# This will only work if BRUTE_PROTECT == True
try:
import axes.utils
except:
pass
if settings.DEBUG:
import logging
logging.basicConfig(level=logging.INFO)
@csrf_exempt
@login_required
def search(request):
user = request.user
user_level = user.userprofile.level
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
else:
raise Http404
# Make sure we're searching across Machines the user has access to:
machines = Machine.objects.all()
if user_level == 'GA':
machines = machines
else:
for business_unit in BusinessUnit.objects.all():
if business_unit not in user.businessunit_set.all():
machines = machines.exclude(machine_group__business_unit = business_unit)
if query_string.lower().startswith('facter:'):
query_string = query_string.replace('facter:','').replace('Facter:', '').strip()
machines = Fact.objects.filter(machine=machines)
template = 'server/search_facter.html'
elif query_string.lower().startswith('condition:'):
query_string = query_string.replace('condition:','').replace('Condition:', '').strip()
machines = Condition.objects.filter(machine=machines)
template = 'server/search_condition.html'
elif query_string.lower().startswith('inventory:'):
query_string = query_string.replace('inventory:','').replace('Inventory:', '').strip()
machines = InventoryItem.objects.filter(machine=machines)
template = 'server/search_inventory.html'
else:
template = 'server/search_machines.html'
search_results = watson.filter(machines, query_string)
title = "Search results for %s" % query_string
c = {'user': request.user, 'search_results': search_results, 'title':title, 'request':request}
return render_to_response(template, c, context_instance=RequestContext(request))
@login_required
def index(request):
# Get the current user's Business Units
user = request.user
# Count the number of users. If there is only one, they need to be made a GA
if User.objects.count() == 1:
# The first user created by syncdb won't have a profile. If there isn't one, make sure they get one.
try:
profile = UserProfile.objects.get(user=user)
except UserProfile.DoesNotExist:
profile = UserProfile(user=user)
profile.level = 'GA'
profile.save()
user_level = user.userprofile.level
now = django.utils.timezone.now()
hour_ago = now - timedelta(hours=1)
today = now - timedelta(hours=24)
week_ago = today - timedelta(days=7)
month_ago = today - timedelta(days=30)
three_months_ago = today - timedelta(days=90)
config_installed = 'config' in settings.INSTALLED_APPS
if user_level != 'GA':
# user has many BU's display them all in a friendly manner
business_units = user.businessunit_set.all()
if user.businessunit_set.count() == 0:
c = {'user': request.user, }
return render_to_response('server/no_access.html', c, context_instance=RequestContext(request))
if user.businessunit_set.count() == 1:
# user only has one BU, redirect to it
for bu in user.businessunit_set.all():
return redirect('server.views.bu_dashboard', bu_id=bu.id)
break
# Load in the default plugins if needed
utils.loadDefaultPlugins()
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = []
# Get all the enabled plugins
enabled_plugins = Plugin.objects.all().order_by('order')
for enabled_plugin in enabled_plugins:
# Loop round the plugins and print their names.
for plugin in manager.getAllPlugins():
# If plugin_type isn't set, assume its an old style one
try:
plugin_type = plugin.plugin_object.plugin_type()
except:
plugin_type = 'widget'
if plugin.name == enabled_plugin.name and \
plugin_type != 'machine_info' and plugin_type != 'full_page':
data = {}
data['name'] = plugin.name
data['width'] = plugin.plugin_object.widget_width()
data['html'] = '<div id="plugin-%s" class="col-md-%s"><img class="center-block blue-spinner" src="%s"/></div>' % (data['name'], str(data['width']), static('img/blue-spinner.gif'))
output.append(data)
break
output = utils.orderPluginOutput(output)
# get the user level - if they're a global admin, show all of the machines. If not, show only the machines they have access to
if user_level == 'GA':
business_units = BusinessUnit.objects.all()
else:
business_units = user.businessunit_set.all()
c = {'user': request.user, 'business_units': business_units, 'output': output, }
return render_to_response('server/index.html', c, context_instance=RequestContext(request))
# Manage Users
@login_required
def manage_users(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
try:
brute_protect = settings.BRUTE_PROTECT
except:
brute_protect = False
# We require you to be staff to manage users
if user.is_staff != True:
return redirect(index)
users = User.objects.all()
c = {'user':request.user, 'users':users, 'request':request, 'brute_protect':brute_protect}
return render_to_response('server/manage_users.html', c, context_instance=RequestContext(request))
# Unlock account
@login_required
def brute_unlock(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
try:
brute_protect = settings.BRUTE_PROTECT
except:
brute_protect = False
if brute_protect == False:
return redirect(index)
# We require you to be staff to manage users
if user.is_staff != True:
return redirect(index)
axes.utils.reset()
c = {'user':request.user, 'request':request, 'brute_protect':brute_protect}
return render_to_response('server/brute_unlock.html', c, context_instance=RequestContext(request))
# New User
@login_required
def new_user(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
# We require you to be staff to manage users
if user.is_staff != True:
return redirect(index)
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
user_profile = UserProfile.objects.get(user=user)
user_profile.level=request.POST['user_level']
user_profile.save()
return redirect('manage_users')
else:
form = NewUserForm()
c = {'form': form}
return render_to_response('forms/new_user.html', c, context_instance=RequestContext(request))
@login_required
def edit_user(request, user_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
# We require you to be staff to manage users
if user.is_staff != True:
return redirect(index)
the_user = get_object_or_404(User, pk=int(user_id))
c = {}
c.update(csrf(request))
if request.method == 'POST':
if the_user.has_usable_password:
form = EditUserForm(request.POST)
else:
form = EditLDAPUserForm(request.POST)
if form.is_valid():
user = form.save()
user_profile = UserProfile.objects.get(user=the_user)
user_profile.level=request.POST['user_level']
user_profile.save()
if user_profile.level != 'GA':
user.is_staff = False
user.save()
return redirect('manage_users')
else:
if the_user.has_usable_password:
form = EditUserForm({'user_level':the_user.userprofile.level, 'user_id':the_user.id})
else:
form = EditLDAPUserForm({'user_level':the_user.userprofile.level, 'user_id':the_user.id})
c = {'form': form, 'the_user':the_user}
return render_to_response('forms/edit_user.html', c, context_instance=RequestContext(request))
@login_required
def user_add_staff(request, user_id):
user_level = request.user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.user.id == int(user_id):
# You shouldn't have been able to get here anyway
return redirect('manage_users')
user = get_object_or_404(User, pk=int(user_id))
user.is_staff = True
user.save()
return redirect('manage_users')
@login_required
def user_remove_staff(request, user_id):
user_level = request.user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.user.id == int(user_id):
# You shouldn't have been able to get here anyway
return redirect('manage_users')
user = get_object_or_404(User, pk=int(user_id))
user.is_staff = False
user.save()
return redirect('manage_users')
def delete_user(request, user_id):
user_level = request.user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.user.id == int(user_id):
# You shouldn't have been able to get here anyway
return redirect('manage_users')
user = get_object_or_404(User, pk=int(user_id))
user.delete()
return redirect('manage_users')
# Plugin machine list
@login_required
def machine_list(request, pluginName, data, page='front', theID=None):
user = request.user
title = None
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
# get a list of machines (either from the BU or the group)
if page == 'front':
# get all machines
if user.userprofile.level == 'GA':
machines = Machine.objects.all()
else:
machines = Machine.objects.none()
for business_unit in user.businessunit_set.all():
for group in business_unit.machinegroup_set.all():
machines = machines | group.machine_set.all()
if page == 'bu_dashboard':
# only get machines for that BU
# Need to make sure the user is allowed to see this
business_unit = get_object_or_404(BusinessUnit, pk=theID)
machine_groups = MachineGroup.objects.filter(business_unit=business_unit).prefetch_related('machine_set').all()
if machine_groups.count() != 0:
machines_unsorted = machine_groups[0].machine_set.all()
for machine_group in machine_groups[1:]:
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
else:
machines_unsorted = None
machines=machines_unsorted
if page == 'group_dashboard':
# only get machines from that group
machine_group = get_object_or_404(MachineGroup, pk=theID)
# check that the user has access to this
machines = Machine.objects.filter(machine_group=machine_group)
# send the machines and the data to the plugin
for plugin in manager.getAllPlugins():
if plugin.name == pluginName:
(machines, title) = plugin.plugin_object.filter_machines(machines, data)
c = {'user':user, 'plugin_name': pluginName, 'machines': machines, 'req_type': page, 'title': title, 'bu_id': theID, 'request':request, 'data':data }
return render_to_response('server/overview_list_all.html', c, context_instance=RequestContext(request))
# Plugin machine list
@login_required
def plugin_load(request, pluginName, page='front', theID=None):
user = request.user
title = None
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
# get a list of machines (either from the BU or the group)
if page == 'front':
# get all machines
if user.userprofile.level == 'GA':
machines = Machine.objects.all()
else:
machines = Machine.objects.none()
for business_unit in user.businessunit_set.all():
for group in business_unit.machinegroup_set.all():
machines = machines | group.machine_set.all()
if page == 'bu_dashboard':
# only get machines for that BU
# Need to make sure the user is allowed to see this
business_unit = get_object_or_404(BusinessUnit, pk=theID)
machine_groups = MachineGroup.objects.filter(business_unit=business_unit).prefetch_related('machine_set').all()
if machine_groups.count() != 0:
machines_unsorted = machine_groups[0].machine_set.all()
for machine_group in machine_groups[1:]:
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
else:
machines_unsorted = None
machines=machines_unsorted
if page == 'group_dashboard':
# only get machines from that group
machine_group = get_object_or_404(MachineGroup, pk=theID)
# check that the user has access to this
machines = Machine.objects.filter(machine_group=machine_group)
# send the machines and the data to the plugin
for plugin in manager.getAllPlugins():
if plugin.name == pluginName:
html = plugin.plugin_object.widget_content(page, machines, theID)
# c = {'user':user, 'plugin_name': pluginName, 'machines': machines, 'req_type': page, 'title': title, 'bu_id': theID, 'request':request }
# return render_to_response('server/overview_list_all.html', c, context_instance=RequestContext(request))
return HttpResponse(html)
@login_required
def export_csv(request, pluginName, data, page='front', theID=None):
user = request.user
title = None
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
# get a list of machines (either from the BU or the group)
if page == 'front':
# get all machines
if user.userprofile.level == 'GA':
machines = Machine.objects.all()
else:
machines = Machine.objects.none()
for business_unit in user.businessunit_set.all():
for group in business_unit.machinegroup_set.all():
machines = machines | group.machine_set.all()
if page == 'bu_dashboard':
# only get machines for that BU
# Need to make sure the user is allowed to see this
business_unit = get_object_or_404(BusinessUnit, pk=theID)
machine_groups = MachineGroup.objects.filter(business_unit=business_unit).prefetch_related('machine_set').all()
if machine_groups.count() != 0:
machines_unsorted = machine_groups[0].machine_set.all()
for machine_group in machine_groups[1:]:
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
else:
machines_unsorted = None
machines=machines_unsorted
if page == 'group_dashboard':
# only get machines from that group
machine_group = get_object_or_404(MachineGroup, pk=theID)
# check that the user has access to this
machines = Machine.objects.filter(machine_group=machine_group)
# send the machines and the data to the plugin
for plugin in manager.getAllPlugins():
if plugin.name == pluginName:
(machines, title) = plugin.plugin_object.filter_machines(machines, data)
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % title
writer = csv.writer(response)
# Fields
header_row = []
fields = Machine._meta.get_fields()
for field in fields:
if not field.is_relation and field.name != 'id' and field.name != 'report' and field.name != 'activity' and field.name != 'os_family':
header_row.append(field.name)
header_row.append('business_unit')
header_row.append('machine_group')
writer.writerow(header_row)
for machine in machines:
row = []
for name, value in machine.get_fields():
if name != 'id' and name !='machine_group' and name != 'report' and name != 'activity' and name != 'os_family':
row.append(value.strip())
row.append(machine.machine_group.business_unit.name)
row.append(machine.machine_group.name)
writer.writerow(row)
#writer.writerow([machine.serial, machine.machine_group.business_unit.name, machine.machine_group.name,
#machine.hostname, machine.operating_system, machine.memory, machine.memory_kb, machine.munki_version, machine.manifest])
return response
# New BU
@login_required
def new_business_unit(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = BusinessUnitForm(request.POST)
if form.is_valid():
new_business_unit = form.save(commit=False)
new_business_unit.save()
form.save_m2m()
return redirect('bu_dashboard', new_business_unit.id)
else:
form = BusinessUnitForm()
c = {'form': form}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/new_business_unit.html', c, context_instance=RequestContext(request))
# Edit BU
@login_required
def edit_business_unit(request, bu_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
business_unit = get_object_or_404(BusinessUnit, pk=int(bu_id))
c = {}
c.update(csrf(request))
if request.method == 'POST':
if user.is_staff:
form = EditUserBusinessUnitForm(request.POST, instance=business_unit)
else:
form = EditBusinessUnitForm(request.POST, instance=business_unit)
if form.is_valid():
new_business_unit = form.save(commit=False)
new_business_unit.save()
form.save_m2m()
return redirect('bu_dashboard', new_business_unit.id)
else:
if user.is_staff:
form = EditUserBusinessUnitForm(instance=business_unit)
else:
form = EditBusinessUnitForm(instance=business_unit)
c = {'form': form, 'business_unit':business_unit}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/edit_business_unit.html', c, context_instance=RequestContext(request))
@login_required
def delete_business_unit(request, bu_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
business_unit = get_object_or_404(BusinessUnit, pk=int(bu_id))
config_installed = 'config' in settings.INSTALLED_APPS
machine_groups = business_unit.machinegroup_set.all()
machines = []
# for machine_group in machine_groups:
# machines.append(machine_group.machine_set.all())
machines = Machine.objects.filter(machine_group__business_unit=business_unit)
c = {'user': user, 'business_unit':business_unit, 'config_installed':config_installed, 'machine_groups': machine_groups, 'machines':machines}
return render_to_response('server/business_unit_delete_confirm.html', c, context_instance=RequestContext(request))
@login_required
def really_delete_business_unit(request, bu_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
business_unit = get_object_or_404(BusinessUnit, pk=int(bu_id))
business_unit.delete()
return redirect(index)
# BU Dashboard
@login_required
def bu_dashboard(request, bu_id):
user = request.user
user_level = user.userprofile.level
business_unit = get_object_or_404(BusinessUnit, pk=bu_id)
bu = business_unit
config_installed = 'config' in settings.INSTALLED_APPS
if business_unit not in user.businessunit_set.all() and user_level != 'GA':
print 'not letting you in ' + user_level
return redirect(index)
# Get the groups within the Business Unit
machine_groups = business_unit.machinegroup_set.all()
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
machines = utils.getBUmachines(bu_id)
now = django.utils.timezone.now()
hour_ago = now - timedelta(hours=1)
today = now - timedelta(hours=24)
week_ago = today - timedelta(days=7)
month_ago = today - timedelta(days=30)
three_months_ago = today - timedelta(days=90)
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = []
# Get all the enabled plugins
enabled_plugins = Plugin.objects.all().order_by('order')
for enabled_plugin in enabled_plugins:
# Loop round the plugins and print their names.
for plugin in manager.getAllPlugins():
try:
plugin_type = plugin.plugin_object.plugin_type()
except:
plugin_type = 'widget'
if plugin.name == enabled_plugin.name and \
plugin_type != 'machine_info' and plugin_type != 'full_page':
data = {}
data['name'] = plugin.name
data['width'] = plugin.plugin_object.widget_width()
data['html'] = '<div id="plugin-%s" class="col-md-%s"><img class="center-block blue-spinner" src="%s"/></div>' % (data['name'], str(data['width']), static('img/blue-spinner.gif'))
output.append(data)
break
output = utils.orderPluginOutput(output, 'bu_dashboard', bu.id)
c = {'user': request.user, 'machine_groups': machine_groups, 'is_editor': is_editor, 'business_unit': business_unit, 'user_level': user_level, 'output':output, 'config_installed':config_installed }
return render_to_response('server/bu_dashboard.html', c, context_instance=RequestContext(request))
# Overview list (all)
@login_required
def overview_list_all(request, req_type, data, bu_id=None):
# get all the BU's that the user has access to
user = request.user
user_level = user.userprofile.level
operating_system = None
activity = None
inactivity = None
disk_space = None
now = django.utils.timezone.now()
hour_ago = now - timedelta(hours=1)
today = now - timedelta(hours=24)
week_ago = today - timedelta(days=7)
month_ago = today - timedelta(days=30)
three_months_ago = today - timedelta(days=90)
mem_4_gb = 4 * 1024 * 1024
mem_415_gb = 4.15 * 1024 * 1024
mem_775_gb = 7.75 * 1024 * 1024
mem_8_gb = 8 * 1024 * 1024
if req_type == 'operating_system':
operating_system = data
if req_type == 'activity':
activity = data
if req_type == 'inactivity':
inactivity = data
if req_type == 'disk_space_ok':
disk_space_ok = data
if req_type == 'disk_space_warning':
disk_space_warning = data
if req_type == 'disk_space_alert':
disk_space_alert = data
if req_type == 'mem_ok':
disk_space_alert = data
if req_type == 'mem_warning':
disk_space_alert = data
if req_type == 'mem_alert':
disk_space_alert = data
if req_type == 'pending_updates':
pending_update = data
if req_type == 'pending_apple_updates':
pending_apple_update = data
if bu_id != None:
business_units = get_object_or_404(BusinessUnit, pk=bu_id)
machine_groups = MachineGroup.objects.filter(business_unit=business_units).prefetch_related('machine_set').all()
machines_unsorted = machine_groups[0].machine_set.all()
for machine_group in machine_groups[1:]:
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
all_machines=machines_unsorted
# check user is allowed to see it
if business_units not in user.businessunit_set.all():
if user_level != 'GA':
print 'not letting you in ' + user_level
return redirect(index)
else:
# all BUs the user has access to
business_units = user.businessunit_set.all()
# get all the machine groups
# business_unit = business_units[0].machinegroup_set.all()
machines_unsorted = Machine.objects.none()
for business_unit in business_units:
for machine_group in business_unit.machinegroup_set.all():
#print machines_unsorted
machines_unsorted = machines_unsorted | machine_group.machine_set.all()
#machines_unsorted = machines_unsorted | machine_group.machines.all()
#machines = user.businessunit_set.select_related('machine_group_set').order_by('machine')
all_machines = machines_unsorted
if user_level == 'GA':
business_units = BusinessUnit.objects.all()
all_machines = Machine.objects.all()
if req_type == 'errors':
machines = all_machines.filter(errors__gt=0)
if req_type == 'warnings':
machines = all_machines.filter(warnings__gt=0)
if req_type == 'active':
machines = all_machines.filter(activity__isnull=False)
if req_type == 'disk_space_ok':
machines = all_machines.filter(hd_percent__lt=80)
if req_type == 'disk_space_warning':
machines = all_machines.filter(hd_percent__range=["80", "89"])
if req_type == 'disk_space_alert':
machines = all_machines.filter(hd_percent__gte=90)
if req_type == 'mem_ok':
machines = all_machines.filter(memory_kb__gte=mem_8_gb)
if req_type == 'mem_warning':
machines = all_machines.filter(memory_kb__range=[mem_4_gb, mem_775_gb])
if req_type == 'mem_alert':
machines = all_machines.filter(memory_kb__lt=mem_4_gb)
if req_type == 'uptime_ok':
machines = all_machines.filter(fact__fact_name='uptime_days', fact__fact_data__lte=1)
if req_type == 'uptime_warning':
machines = all_machines.filter(fact__fact_name='uptime_days', fact__fact_data__range=[1,7])
if req_type == 'uptime_alert':
machines = all_machines.filter(fact__fact_name='uptime_days', fact__fact_data__gt=7)
if activity is not None:
if data == '1-hour':
machines = all_machines.filter(last_checkin__gte=hour_ago)
if data == 'today':
machines = all_machines.filter(last_checkin__gte=today)
if data == '1-week':
machines = all_machines.filter(last_checkin__gte=week_ago)
if inactivity is not None:
if data == '1-month':
machines = all_machines.filter(last_checkin__range=(three_months_ago, month_ago))
if data == '3-months':
machines = all_machines.exclude(last_checkin__gte=three_months_ago)
if operating_system is not None:
machines = all_machines.filter(operating_system__exact=operating_system)
if req_type == 'pending_updates':
machines = all_machines.filter(pendingupdate__update=pending_update)
if req_type == 'pending_apple_updates':
machines = all_machines.filter(pendingappleupdate__update=pending_apple_update)
c = {'user':user, 'machines': machines, 'req_type': req_type, 'data': data, 'bu_id': bu_id }
return render_to_response('server/overview_list_all.html', c, context_instance=RequestContext(request))
# Machine Group Dashboard
@login_required
def group_dashboard(request, group_id):
# check user is allowed to access this
user = request.user
config_installed = 'config' in settings.INSTALLED_APPS
user_level = user.userprofile.level
machine_group = get_object_or_404(MachineGroup, pk=group_id)
business_unit = machine_group.business_unit
if business_unit not in user.businessunit_set.all():
if user_level != 'GA':
return redirect(index)
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
machines = machine_group.machine_set.all()
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = []
# Get all the enabled plugins
enabled_plugins = Plugin.objects.all().order_by('order')
for enabled_plugin in enabled_plugins:
# Loop round the plugins and print their names.
for plugin in manager.getAllPlugins():
try:
plugin_type = plugin.plugin_object.plugin_type()
except:
plugin_type = 'widget'
if plugin.name == enabled_plugin.name and \
plugin_type != 'machine_info' and plugin_type != 'full_page':
data = {}
data['name'] = plugin.name
data['width'] = plugin.plugin_object.widget_width()
data['html'] = '<div id="plugin-%s" class="col-md-%s"><img class="center-block blue-spinner" src="%s"/></div>' % (data['name'], str(data['width']), static('img/blue-spinner.gif'))
output.append(data)
break
output = utils.orderPluginOutput(output, 'group_dashboard', machine_group.id)
c = {'user': request.user, 'machine_group': machine_group, 'user_level': user_level, 'is_editor': is_editor, 'business_unit': business_unit, 'output':output, 'config_installed':config_installed, 'request':request}
return render_to_response('server/group_dashboard.html', c, context_instance=RequestContext(request))
# New Group
@login_required
def new_machine_group(request, bu_id):
c = {}
c.update(csrf(request))
business_unit = get_object_or_404(BusinessUnit, pk=bu_id)
if request.method == 'POST':
form = MachineGroupForm(request.POST)
if form.is_valid():
new_machine_group = form.save(commit=False)
new_machine_group.business_unit = business_unit
new_machine_group.save()
#form.save_m2m()
return redirect('group_dashboard', new_machine_group.id)
else:
form = MachineGroupForm()
user = request.user
user_level = user.userprofile.level
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
if business_unit not in user.businessunit_set.all() or is_editor == False:
if user_level != 'GA':
return redirect(index)
c = {'form': form, 'is_editor': is_editor, 'business_unit': business_unit, }
return render_to_response('forms/new_machine_group.html', c, context_instance=RequestContext(request))
# Edit Group
@login_required
def edit_machine_group(request, group_id):
c = {}
c.update(csrf(request))
machine_group = get_object_or_404(MachineGroup, pk=group_id)
business_unit = machine_group.business_unit
user = request.user
user_level = user.userprofile.level
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
if business_unit not in user.businessunit_set.all() or is_editor == False:
if user_level != 'GA':
return redirect(index)
if request.method == 'POST':
form = EditMachineGroupForm(request.POST, instance=machine_group)
if form.is_valid():
machine_group.save()
#form.save_m2m()
return redirect('group_dashboard', machine_group.id)
else:
form = EditMachineGroupForm(instance=machine_group)
c = {'form': form, 'is_editor': is_editor, 'business_unit': business_unit, 'machine_group':machine_group}
return render_to_response('forms/edit_machine_group.html', c, context_instance=RequestContext(request))
# Delete Group
# New machine
@login_required
def new_machine(request, group_id):
c = {}
c.update(csrf(request))
machine_group = get_object_or_404(MachineGroup, pk=group_id)
business_unit = machine_group.business_unit
if request.method == 'POST':
form = NewMachineForm(request.POST)
if form.is_valid():
new_machine = form.save(commit=False)
new_machine.machine_group = machine_group
new_machine.save()
#form.save_m2m()
return redirect('machine_detail', new_machine.id)
else:
form = NewMachineForm()
user = request.user
user_level = user.userprofile.level
if user_level == 'GA' or user_level == 'RW':
is_editor = True
else:
is_editor = False
if business_unit not in user.businessunit_set.all() or is_editor == False:
if user_level != 'GA':
return redirect(index)
c = {'form': form, 'is_editor': is_editor, 'machine_group': machine_group, }
return render_to_response('forms/new_machine.html', c, context_instance=RequestContext(request))
# Machine detail
@login_required
def machine_detail(request, machine_id):
try:
machine = Machine.objects.get(pk=machine_id)
except (ValueError, Machine.DoesNotExist):
machine = get_object_or_404(Machine, serial=machine_id)
machine_group = machine.machine_group
business_unit = machine_group.business_unit
# check the user is in a BU that's allowed to see this Machine
user = request.user
user_level = user.userprofile.level
if business_unit not in user.businessunit_set.all():
if user_level != 'GA':
return redirect(index)
report = machine.get_report()
if machine.facts.count() != 0:
facts = machine.facts.all()
if settings.EXCLUDED_FACTS:
for excluded in settings.EXCLUDED_FACTS:
facts = facts.exclude(fact_name=excluded)
else:
facts = None
if machine.conditions.count() != 0:
conditions = machine.conditions.all()
# get the IP address(es) from the condition
try:
ip_address = conditions.get(machine=machine, condition_name__exact='ipv4_address')
ip_address = ip_address.condition_data
except:
ip_address = None
if settings.EXCLUDED_CONDITIONS:
for excluded in settings.EXCLUDED_CONDITIONS:
conditions = conditions.exclude(condition_name=excluded)
else:
conditions = None
ip_address = None
install_results = {}
for result in report.get('InstallResults', []):
nameAndVers = result['name'] + '-' + result['version']
if result['status'] == 0:
install_results[nameAndVers] = "installed"
else:
install_results[nameAndVers] = 'error'
if install_results:
for item in report.get('ItemsToInstall', []):
name = item.get('display_name', item['name'])
nameAndVers = ('%s-%s'
% (name, item['version_to_install']))
item['install_result'] = install_results.get(
nameAndVers, 'pending')
# Get the update history
try:
update_history = UpdateHistory.objects.get(machine=machine,
version=item['version_to_install'],
name=item['name'], update_type='third_party')
item['update_history'] = UpdateHistoryItem.objects.filter(update_history=update_history)
except IndexError, e:
print e
for item in report.get('ManagedInstalls', []):
if 'version_to_install' in item:
name = item.get('display_name', item['name'])
nameAndVers = ('%s-%s'
% (name, item['version_to_install']))
if install_results.get(nameAndVers) == 'installed':
item['installed'] = True
if 'version_to_install' in item or 'installed_version' in item:
if 'version_to_install' in item:
version = item['version_to_install']
else:
version = item['installed_version']
item['version'] = version
# Get the update history
try:
update_history = UpdateHistory.objects.get(machine=machine,
version=version,
name=item['name'], update_type='third_party')
item['update_history'] = UpdateHistoryItem.objects.filter(update_history=update_history)
except Exception, e:
print e
# handle items that were removed during the most recent run
# this is crappy. We should fix it in Munki.
removal_results = {}
for result in report.get('RemovalResults', []):
m = re.search('^Removal of (.+): (.+)$', result)
if m:
try:
if m.group(2) == 'SUCCESSFUL':
removal_results[m.group(1)] = 'removed'
else:
removal_results[m.group(1)] = m.group(2)
except IndexError:
pass
if removal_results:
for item in report.get('ItemsToRemove', []):
name = item.get('display_name', item['name'])
item['install_result'] = removal_results.get(
name, 'pending')
if item['install_result'] == 'removed':
if not 'RemovedItems' in report:
report['RemovedItems'] = [item['name']]
elif not name in report['RemovedItems']:
report['RemovedItems'].append(item['name'])
config_installed = 'config' in settings.INSTALLED_APPS
if 'managed_uninstalls_list' in report:
report['managed_uninstalls_list'].sort()
if config_installed:
from config.views import filter_uninstalls
report['managed_uninstalls_list'] = filter_uninstalls(business_unit.id, report['managed_uninstalls_list'])
c = {'user':user, 'machine_group': machine_group, 'business_unit': business_unit, 'report': report, 'install_results': install_results, 'removal_results': removal_results, 'machine': machine, 'facts':facts, 'conditions':conditions, 'ip_address':ip_address, 'config_installed':config_installed }
return render_to_response('server/machine_detail.html', c, context_instance=RequestContext(request))
# Edit Machine
# Delete Machine
@login_required
def delete_machine(request, machine_id):
machine = get_object_or_404(Machine, pk=machine_id)
machine_group = machine.machine_group
business_unit = machine_group.business_unit
user = request.user
user_level = user.userprofile.level
if business_unit not in user.businessunit_set.all():
if user_level != 'GA':
return redirect(index)
machine.delete()
return redirect('group_dashboard', machine_group.id)
@login_required
def settings_page(request):
user = request.user
user_level = user.userprofile.level
# Pull the historical_data setting
try:
historical_setting = SalSetting.objects.get(name='historical_retention')
except SalSetting.DoesNotExist:
historical_setting = SalSetting(name='historical_retention', value='180')
historical_setting.save()
historical_setting_form = SettingsHistoricalDataForm(initial={'days': historical_setting.value})
if user_level != 'GA':
return redirect(index)
c = {'user':request.user, 'request':request, 'historical_setting_form':historical_setting_form}
return render_to_response('server/settings.html', c, context_instance=RequestContext(request))
@login_required
def settings_historical_data(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = SettingsHistoricalDataForm(request.POST)
# check whether it's valid:
if form.is_valid():
try:
historical_setting = SalSetting.objects.get(name='historical_retention')
except SalSetting.DoesNotExist:
historical_setting = SalSetting(name='historical_retention')
historical_setting.value = form.cleaned_data['days']
historical_setting.save()
messages.success(request, 'Data retention settings saved.')
return redirect('settings_page')
else:
return redirect('settings_page')
@login_required
def plugins_page(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
# Load the plugins
utils.reloadPluginsModel()
enabled_plugins = Plugin.objects.all()
disabled_plugins = utils.disabled_plugins()
c = {'user':request.user, 'request':request, 'enabled_plugins':enabled_plugins, 'disabled_plugins':disabled_plugins}
return render_to_response('server/plugins.html', c, context_instance=RequestContext(request))
@login_required
def plugin_plus(request, plugin_id):
user = request.user
profile = UserProfile.objects.get(user=user)
user_level = profile.level
if user_level != 'GA':
return redirect('server.views.index')
# get current plugin order
current_plugin = get_object_or_404(Plugin, pk=plugin_id)
# get 'old' next one
old_plugin = get_object_or_404(Plugin, order=(int(current_plugin.order)+1))
current_plugin.order = current_plugin.order + 1
current_plugin.save()
old_plugin.order = old_plugin.order - 1
old_plugin.save()
return redirect('plugins_page')
@login_required
def plugin_minus(request, plugin_id):
user = request.user
profile = UserProfile.objects.get(user=user)
user_level = profile.level
if user_level != 'GA':
return redirect('server.views.index')
# get current plugin order
current_plugin = get_object_or_404(Plugin, pk=plugin_id)
#print current_plugin
# get 'old' previous one
old_plugin = get_object_or_404(Plugin, order=(int(current_plugin.order)-1))
current_plugin.order = current_plugin.order - 1
current_plugin.save()
old_plugin.order = old_plugin.order + 1
old_plugin.save()
return redirect('plugins_page')
@login_required
def plugin_disable(request, plugin_id):
user = request.user
profile = UserProfile.objects.get(user=user)
user_level = profile.level
if user_level != 'GA':
return redirect('server.views.index')
plugin = get_object_or_404(Plugin, pk=plugin_id)
plugin.delete()
return redirect('plugins_page')
@login_required
def plugin_enable(request, plugin_name):
# only do this if there isn't a plugin already with the name
try:
plugin = Plugin.objects.get(name=plugin_name)
except Plugin.DoesNotExist:
plugin = Plugin(name=plugin_name, order=utils.UniquePluginOrder())
plugin.save()
return redirect('plugins_page')
@login_required
def api_keys(request):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_keys = ApiKey.objects.all()
c = {'user':request.user, 'api_keys':api_keys, 'request':request}
return render_to_response('server/api_keys.html', c, context_instance=RequestContext(request))
@login_required
def new_api_key(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = ApiKeyForm(request.POST)
if form.is_valid():
new_api_key = form.save()
return redirect('display_api_key', key_id=new_api_key.id)
else:
form = ApiKeyForm()
c = {'form': form}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/new_api_key.html', c, context_instance=RequestContext(request))
@login_required
def display_api_key(request, key_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_key = get_object_or_404(ApiKey, pk=int(key_id))
if api_key.has_been_seen == True:
return redirect(index)
else:
api_key.has_been_seen = True
api_key.save()
c = {'user':request.user, 'api_key':api_key, 'request':request}
return render_to_response('server/api_key_display.html', c, context_instance=RequestContext(request))
@login_required
def edit_api_key(request, key_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_key = get_object_or_404(ApiKey, pk=int(key_id))
c = {}
c.update(csrf(request))
if request.method == 'POST':
form = ApiKeyForm(request.POST, instance=api_key)
if form.is_valid():
api_key = form.save()
return redirect(api_keys)
else:
form = ApiKeyForm(instance=api_key)
c = {'form': form, 'api_key':api_key}
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
return render_to_response('forms/edit_api_key.html', c, context_instance=RequestContext(request))
@login_required
def delete_api_key(request, key_id):
user = request.user
user_level = user.userprofile.level
if user_level != 'GA':
return redirect(index)
api_key = get_object_or_404(ApiKey, pk=int(key_id))
api_key.delete()
return redirect(api_keys)
# preflight
@csrf_exempt
def preflight(request):
# Build the manager
manager = PluginManager()
# Tell it the default place(s) where to find plugins
manager.setPluginPlaces([settings.PLUGIN_DIR, os.path.join(settings.PROJECT_DIR, 'server/plugins')])
# Load all plugins
manager.collectPlugins()
output = {}
output['queries'] = {}
for plugin in manager.getAllPlugins():
counter = 0
try:
if plugin.plugin_object.plugin_type() == 'osquery':
# No other plugins will have info for this
for query in plugin.plugin_object.get_queries():
name = query['name']
del query['name']
output['queries'][name] = {}
output['queries'][name] = query
except:
pass
return HttpResponse(json.dumps(output))
# checkin
@csrf_exempt
def checkin(request):
if request.method != 'POST':
print 'not post data'
raise Http404
data = request.POST
key = data.get('key')
serial = data.get('serial')
serial = serial.upper()
# Take out some of the weird junk VMware puts in. Keep an eye out in case Apple actually uses these:
serial = serial.replace('/', '')
serial = serial.replace('+', '')
# Are we using Sal for some sort of inventory (like, I don't know, Puppet?)
try:
add_new_machines = settings.ADD_NEW_MACHINES
except:
add_new_machines = True
if add_new_machines == True:
# look for serial number - if it doesn't exist, create one
if serial:
try:
machine = Machine.objects.get(serial=serial)
except Machine.DoesNotExist:
machine = Machine(serial=serial)
else:
machine = get_object_or_404(Machine, serial=serial)
if key is None or key == 'None':
try:
key = settings.DEFAULT_MACHINE_GROUP_KEY
except Exception:
pass
machine_group = get_object_or_404(MachineGroup, key=key)
business_unit = machine_group.business_unit
try:
historical_setting = SalSetting.objects.get(name='historical_retention')
historical_days = historical_setting.value
except SalSetting.DoesNotExist:
historical_setting = SalSetting(name='historical_retention', value='180')
historical_setting.save()
historical_days = '180'
if machine:
machine.hostname = data.get('name', '<NO NAME>')
try:
use_enc = settings.USE_ENC
# If we're using Sal's Puppet ENC, don't change the machine group,
# as we're setting it in the GUI
except:
use_enc = False
if use_enc == False:
machine.machine_group = machine_group
machine.last_checkin = datetime.now()
if 'username' in data:
machine.username = data.get('username')
if 'base64bz2report' in data:
machine.update_report(data.get('base64bz2report'))
if 'sal_version' in data:
machine.sal_version = data.get('sal_version')
# extract machine data from the report
report_data = machine.get_report()
if 'Puppet_Version' in report_data:
machine.puppet_version = report_data['Puppet_Version']
if 'ManifestName' in report_data:
manifest = report_data['ManifestName']
machine.manifest = manifest
if 'MachineInfo' in report_data:
machine.operating_system = report_data['MachineInfo'].get(
'os_vers', 'UNKNOWN')
# some machines are reporting 10.9, some 10.9.0 - make them the same
if len(machine.operating_system) <= 4:
machine.operating_system = machine.operating_system + '.0'
machine.hd_space = report_data.get('AvailableDiskSpace') or 0
machine.hd_total = int(data.get('disk_size')) or 0
machine.hd_percent = int(round(((float(machine.hd_total)-float(machine.hd_space))/float(machine.hd_total))*100))
machine.munki_version = report_data.get('ManagedInstallVersion') or 0
hwinfo = {}
if 'SystemProfile' in report_data.get('MachineInfo', []):
for profile in report_data['MachineInfo']['SystemProfile']:
if profile['_dataType'] == 'SPHardwareDataType':
hwinfo = profile._items[0]
break
if 'Puppet' in report_data:
puppet = report_data.get('Puppet')
if 'time' in puppet:
machine.last_puppet_run = datetime.fromtimestamp(float(puppet['time']['last_run']))
if 'events' in puppet:
machine.puppet_errors = puppet['events']['failure']
if hwinfo:
machine.machine_model = hwinfo.get('machine_model')
machine.cpu_type = hwinfo.get('cpu_type')
machine.cpu_speed = hwinfo.get('current_processor_speed')
machine.memory = hwinfo.get('physical_memory')
if hwinfo.get('physical_memory')[-2:] == 'MB':
memory_mb = float(hwinfo.get('physical_memory')[:-3])
machine.memory_kb = int(memory_mb * 1024)
if hwinfo.get('physical_memory')[-2:] == 'GB':
memory_gb = float(hwinfo.get('physical_memory')[:-3])
machine.memory_kb = int(memory_gb * 1024 * 1024)
if hwinfo.get('physical_memory')[-2:] == 'TB':
memory_tb = float(hwinfo.get('physical_memory')[:-3])
machine.memory_kb = int(memory_tb * 1024 * 1024 * 1024)
if 'os_family' in report_data:
machine.os_family = report_data['os_family']
machine.save()
# Remove existing PendingUpdates for the machine
updates = machine.pending_updates.all()
updates.delete()
now = datetime.now()
if 'ItemsToInstall' in report_data:
for update in report_data.get('ItemsToInstall'):
display_name = update.get('display_name', update['name'])
update_name = update.get('name')
version = str(update['version_to_install'])
pending_update = PendingUpdate(machine=machine, display_name=display_name, update_version=version, update=update_name)
pending_update.save()
# Let's handle some of those lovely pending installs into the UpdateHistory Model
try:
update_history = UpdateHistory.objects.get(name=update_name, version=version, machine=machine, update_type='third_party')
except UpdateHistory.DoesNotExist:
update_history = UpdateHistory(name=update_name, version=version, machine=machine, update_type='third_party')
update_history.save()
if update_history.pending_recorded == False:
update_history_item = UpdateHistoryItem(update_history=update_history, status='pending', recorded=now)
update_history_item.save()
update_history.pending_recorded = True
update_history.save()
# Remove existing PendingAppleUpdates for the machine
updates = machine.pending_apple_updates.all()
updates.delete()
if 'AppleUpdates' in report_data:
for update in report_data.get('AppleUpdates'):
display_name = update.get('display_name', update['name'])
update_name = update.get('name')
version = str(update['version_to_install'])
pending_update = PendingAppleUpdate(machine=machine, display_name=display_name, update_version=version, update=update_name)
pending_update.save()
# Let's handle some of those lovely pending installs into the UpdateHistory Model
try:
update_history = UpdateHistory.objects.get(name=update_name, version=version, machine=machine, update_type='apple')
except UpdateHistory.DoesNotExist:
update_history = UpdateHistory(name=update_name, version=version, machine=machine, update_type='apple')
update_history.save()
if update_history.pending_recorded == False:
update_history_item = UpdateHistoryItem(update_history=update_history, status='pending', recorded=now)
update_history_item.save()
update_history.pending_recorded = True
update_history.save()
# if Facter data is submitted, we need to first remove any existing facts for this machine
if 'Facter' in report_data:
facts = machine.facts.all()
facts.delete()
# Delete old historical facts
try:
datelimit = datetime.now() - timedelta(days=historical_days)
HistoricalFact.objects.filter(fact_recorded__lt=datelimit).delete()
except Exception:
pass
try:
historical_facts = settings.HISTORICAL_FACTS
except Exception:
historical_facts = []
pass
# now we need to loop over the submitted facts and save them
for fact_name, fact_data in report_data['Facter'].iteritems():
fact = Fact(machine=machine, fact_name=fact_name, fact_data=fact_data)
fact.save()
if fact_name in historical_facts:
fact = HistoricalFact(machine=machine, fact_name=fact_name, fact_data=fact_data, fact_recorded=datetime.now())
fact.save()
if 'Conditions' in report_data:
conditions = machine.conditions.all()
conditions.delete()
for condition_name, condition_data in report_data['Conditions'].iteritems():
# if it's a list (more than one result), we're going to conacetnate it into one comma separated string
if type(condition_data) == list:
result = None
for item in condition_data:
# is this the first loop? If so, no need for a comma
if result:
result = result + ', '+str(item)
else:
result = item
condition_data = result
#print condition_data
condition = Condition(machine=machine, condition_name=condition_name, condition_data=str(condition_data))
condition.save()
if 'osquery' in report_data:
try:
datelimit = (datetime.now() - timedelta(days=historical_days)).strftime("%s")
OSQueryResult.objects.filter(unix_time__lt=datelimit).delete()
except:
pass
for report in report_data['osquery']:
unix_time = int(report['unixTime'])
# Have we already processed this report?
try:
osqueryresult = OSQueryResult.objects.get(hostidentifier=report['hostIdentifier'], machine=machine, unix_time=unix_time, name=report['name'])
continue
except OSQueryResult.DoesNotExist:
osqueryresult = OSQueryResult(hostidentifier=report['hostIdentifier'], machine=machine, unix_time=unix_time, name=report['name'])
osqueryresult.save()
for items in report['diffResults']['added']:
for column, col_data in items.items():
osquerycolumn = OSQueryColumn(osquery_result=osqueryresult, action='added', column_name=column, column_data=col_data)
osquerycolumn.save()
for item in report['diffResults']['removed']:
for column, col_data in items.items():
osquerycolumn = OSQueryColumn(osquery_result=osqueryresult, action='removed', column_name=column, column_data=col_data)
osquerycolumn.save()
return HttpResponse("Sal report submmitted for %s"
% data.get('name'))
@csrf_exempt
def install_log_hash(request, serial):
sha256hash = ''
machine = None
if serial:
try:
machine = Machine.objects.get(serial=serial)
sha256hash = machine.install_log_hash
except (Machine.DoesNotExist, Inventory.DoesNotExist):
pass
else:
return HttpResponse("MACHINE NOT FOUND")
return HttpResponse(sha256hash)
def process_update_item(name, version, update_type, action, recorded, machine, extra=None):
# Get a parent update history item, or create one
try:
update_history = UpdateHistory.objects.get(name=name,
version=version,
update_type=update_type,
machine=machine)
except UpdateHistory.DoesNotExist:
update_history = UpdateHistory(name=name,
version=version,
update_type=update_type,
machine=machine)
update_history.save()
# Now make sure it's not already in there
try:
update_history_item = UpdateHistoryItem.objects.get(
recorded=recorded,
status=action,
update_history=update_history)
except UpdateHistoryItem.DoesNotExist:
# Make one if it doesn't exist
update_history_item = UpdateHistoryItem(
recorded=recorded,
status=action,
update_history=update_history)
update_history_item.save()
if extra:
update_history_item.extra = extra
update_history_item.save()
if action == 'install' or action == 'removal':
update_history.pending_recorded = False
update_history.save()
@csrf_exempt
def install_log_submit(request):
if request.method != 'POST':
raise Http404
submission = request.POST
serial = submission.get('serial')
key = submission.get('key')
machine = None
if serial:
try:
machine = Machine.objects.get(serial=serial)
except Machine.DoesNotExist:
raise Http404
# Check the key
machine_group = get_object_or_404(MachineGroup, key=key)
if machine_group.id != machine.machine_group.id:
raise Http404
compressed_log= submission.get('base64bz2installlog')
if compressed_log:
compressed_log = compressed_log.replace(" ", "+")
log_str = utils.decode_to_string(compressed_log)
machine.install_log = log_str
machine.save()
for line in log_str.splitlines():
# Third party install successes first
m = re.search('(.+) Install of (.+): (.+)$', line)
if m:
try:
if m.group(3) == 'SUCCESSFUL':
the_date = dateutil.parser.parse(m.group(1))
(name, version) = m.group(2).rsplit('-',1)
process_update_item(name, version, 'third_party', 'install', the_date,
machine)
# We've processed this line, move on
continue
except IndexError:
pass
# Third party install failures
m = re.search('(.+) Install of (.+): FAILED (.+)$', line)
if m:
try:
the_date = dateutil.parser.parse(m.group(1))
(name, version) = m.group(2).rsplit('-',1)
extra = m.group(3)
process_update_item(name, version, 'third_party', 'error', the_date,
machine, extra)
# We've processed this line, move on
continue
except IndexError:
pass
# Third party removals
m = re.search('(.+) Removal of (.+): (.+)$', line)
if m:
try:
if m.group(3) == 'SUCCESSFUL':
the_date = dateutil.parser.parse(m.group(1))
(name, version) = m.group(2).rsplit('-',1)
process_update_item(name, version, 'third_party', 'removal', the_date,
machine)
# We've processed this line, move on
continue
except IndexError:
pass
# Third party removal failures
m = re.search('(.+) Removal of (.+): FAILED (.+)$', line)
if m:
try:
the_date = dateutil.parser.parse(m.group(1))
(name, version) = m.group(2).rsplit('-',1)
extra = m.group(3)
process_update_item(name, version, 'third_party', 'error', the_date,
machine, extra)
# We've processed this line, move on
continue
except IndexError:
pass
# Apple update install successes
m = re.search('(.+) Apple Software Update install of (.+): (.+)$', line)
if m:
try:
if m.group(3) == 'FAILED':
the_date = dateutil.parser.parse(m.group(1))
(name, version) = m.group(2).rsplit('-',1)
process_update_item(name, version, 'apple', 'install', the_date,
machine)
# We've processed this line, move on
continue
except IndexError:
pass
# Apple install failures
m = re.search('(.+) Apple Software Update install of (.+): FAILED (.+)$', line)
if m:
try:
the_date = dateutil.parser.parse(m.group(1))
(name, version) = m.group(2).rsplit('-',1)
extra = m.group(3)
process_update_item(name, version, 'apple', 'error', the_date,
machine, extra)
# We've processed this line, move on
continue
except IndexError:
pass
machine.install_log_hash = \
hashlib.sha256(log_str).hexdigest()
machine.install_log = log_str
machine.save()
return HttpResponse("Install Log processed for %s" % serial)
| apache-2.0 |
abzaloid/maps | django-project/lib/python2.7/site-packages/django/db/backends/mysql/schema.py | 16 | 4157 | from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY "
"(%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
)
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
# Inner import to allow module to fail to load gracefully
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def skip_default(self, field):
"""
MySQL doesn't accept default values for longtext and longblob
and implicitly treats these columns as nullable.
"""
return field.db_type(self.connection) in {'longtext', 'longblob'}
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
if self.skip_default(field) and field.default not in {None, NOT_PROVIDED}:
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _model_indexes_sql(self, model):
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
if storage == "InnoDB":
for field in model._meta.local_fields:
if field.db_index and not field.unique and field.get_internal_type() == "ForeignKey":
# Temporary setting db_index to False (in memory) to disable
# index creation for FKs (index automatically created by MySQL)
field.db_index = False
return super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, fields[0], index=True)
if not constraint_names:
self.execute(
self._create_index_sql(model, [model._meta.get_field(fields[0])], suffix="")
)
return super(DatabaseSchemaEditor, self)._delete_composed_index(model, fields, *args)
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
# Keep null property of old field, if it has changed, it will be handled separately
if old_field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, old_field, new_field, new_type)
| mit |
dnidever/noaosourcecatalog | python/nsc_instcal_combine_qacuts.py | 1 | 31160 | #!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils as dln, coords
#import subprocess
import time
from argparse import ArgumentParser
import socket
#from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
#from sklearn.cluster import DBSCAN
#from scipy.optimize import least_squares
#from scipy.interpolate import interp1d
# Combine data for one NSC healpix region
if __name__ == "__main__":
parser = ArgumentParser(description='Combine NSC Instcal Catalogs.')
parser.add_argument('version', type=str, nargs=1, help='Version number')
parser.add_argument('--makelist', action='store_true', help='Make healpix list')
parser.add_argument('-r','--redo', action='store_true', help='Redo this HEALPIX')
parser.add_argument('--nmulti', type=int, default=20, help='Number of jobs to run')
parser.add_argument('--nocuts', action='store_true', help='Do not apply any quality cuts')
args = parser.parse_args()
t0 = time.time()
hostname = socket.gethostname()
host = hostname.split('.')[0]
# Inputs
version = args.version
redo = args.redo
makelist = args.makelist
nmulti = args.nmulti
nocuts = args.nocuts
nside = 128
radeg = 180 / np.pi
# on thing/hulk use
if (host == "thing") or (host == "hulk"):
basedir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/mss1/"
localdir = "/d0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
# on gp09 use
if (host == "gp09") or (host == "gp08") or (host == "gp07") or (host == "gp06") or (host == "gp05"):
basedir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/net/mss1/"
localdir = "/data0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
t0 = time.time()
# Combine all of the data
if ~os.path.exists(basedir+'combine'): os.mkdir(basedir+'combine/')
if ~os.path.exists(basedir+'combine/logs/'): os.mkdir(basedir+'combine/logs/')
if ~os.path.exists(localdir+'dnidever/nsc/instcal/'+version+'/'): os.mkdir(localdir+'dnidever/nsc/instcal/'+version+'/')
plotsdir = basedir+'plots/'
if ~os.path.exists(plotsdir): os.mkdir(plotsdir)
# Log file
#------------------
# format is nsc_combine_main.DATETIME.log
ltime = time.localtime()
# time.struct_time(tm_year=2019, tm_mon=7, tm_mday=22, tm_hour=0, tm_min=30, tm_sec=20, tm_wday=0, tm_yday=203, tm_isdst=1)
smonth = str(ltime[1])
if ltime[1]<10: smonth = '0'+smonth
sday = str(ltime[2])
if ltime[2]<10: sday = '0'+sday
syear = str(ltime[0])[2:]
shour = str(ltime[3])
if ltime[3]<10: shour='0'+shour
sminute = str(ltime[4])
if ltime[4]<10: sminute='0'+sminute
ssecond = str(int(ltime[5]))
if ltime[5]<10: ssecond='0'+ssecond
logtime = smonth+sday+syear+shour+sminute+ssecond
logfile = basedir+'combine/logs/nsc_instcal_combine_main.'+logtime+'.log'
#JOURNAL,logfile
print("Combining NOAO InstCal catalogs")
#goto,STARTRUNNING
# Restore the calibration summary file
temp = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',1)
schema = dict(temp.dtype.fields)
schema['chipindx'] = (int,0)
schema['ngoodchipwcs'] = (int,0)
schema['wcscal'] = (np.str,50)
schema['telstat'] = (np.str,50)
dt = np.dtype(schema)
calstr = np.zeros(len(temp),dtype=dt)
calstr['chipindx'] = -1
for n in temp.dtype.names: calstr[n]=temp[n]
# Add WCSCAL and TELSTAT information
coords = fits.getdata(basedir+'lists/allcoords.fits',1)
fluxfile = calstr['file']
fluxfile = fluxfile.replace('/net','')
ind1,ind2 = dln.match(fluxfile,coords['file'])
calstr['wcscal'][ind1] = coords['wcscal'][ind2] # Failed (3153), Poor (14), Successful (308190)
calstr['telstat'][ind1] = coords['telstat'][ind2] # NAN (68188), Not (1222), Track (241826), UNKNOWN (116), Unknown (5)
# the 2054 failed exposures did not match b/c no fluxfile info
# Only want exposures with successful SE processing
gd,ncalstr = dln.where(calstr['success']==1)
calstr = calstr[gd]
si = np.argsort(calstr['expdir'])
calstr = calstr[si]
chstr = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',2)
nchstr = len(chstr)
# Get indices for CHSTR
siexp = np.argsort(chstr['expdir'])
chstr = chstr[siexp]
expdir = chstr['expdir']
brklo,nbrk = dln.where(expdir != np.roll(expdir,1))
brkhi = [brklo[1:nbrk]-1,len(expdir)-1]
nchexp = brkhi-brklo+1
if ncalstr==len(brklo):
Exception('number of exposures in CALSTR and CHSTR do not match')
calstr['chipindx'] = brklo
calstr['nchips'] = nchexp
# Getting number of good chip WCS for each exposures
for i in range(len(calstr): calstr['ngoodchipwcs'][i] = np.sum(chstr['ngaiamatch']][brklo[i]:brkhi[i]+1]>0)
# Fixing absolute paths of flux filename
cfile = calstr['file']
cfile = cfile.replace('/net/mss1/','')
cfile = cfile.replace('/mss1/','')
# Fixing very negative RAs
print('FIXING NEGATIVE RAs in CALSTR and CHSTR')
#bdra, = np.where(chstr.cenra lt -180,nbdra)
bdra,nbdra = dln.where(chstr['cenra']<0)
dum,uibd = np.unique(chstr['expdir'][bdra],return_indices=True)
ind1,ind2 = dln.match(calstr['expdir'],chstr['expdir'][bdra[uibd]])
nmatch = len(ind1)
for i in range(nmatch):
ind3,ind4 = dln.match(chstr['expdir'][bdra],calstr['expdir'][ind1[i]])
# Fix CALSTR RA
chra = chstr['cenra'][bdra[ind3]]
bd1,nbd1 = dln.where(chra < -180)
if nbd1>0: chra[bd1]+=360
cenra = np.mean(dln.minmax(chra))
if cenra<0: cenra+=360
calstr['ra'][ind1[i]] = cenra
# Fix CHSTR CENRA
bd2,nbd2 = dln.where(chra<0)
if nbd2>0: chra[bd2]+=360
chstr['cenra']][bdra[ind3]] = chra
# Fix CHSTR VRA
vra = chstr['vra'][bdra[ind3]]
bd3,nbd3 = dln.where(vra<0)
if nbd3>0: vra[bd3]+=360
chstr['vra'][bdra[ind3]] = vra
# Fix instrument in STR and CHSTR
print('FIXING INSTRUMENT IN STR AND CHSTR')
type = ['c4d','k4m','ksb']
for i=0,len(type)-1:
gd,ngd = dln.where(stregex(calstr.expdir,'/'+type[i]+'/',/boolean)==1)
if ngd>0: calstr[gd].instrument=type[i]
gd,ngd = dln.where(stregex(chstr.expdir,'/'+type[i]+'/',/boolean)==1)
if ngd>0: chstr[gd].instrument=type[i]
## Fix missing AIRMASS
#bdam, = np.where(str.airmass lt 0.9,nbdam)
#for i=0,nbdam-1 do begin
# type = ['c4d','k4m','ksb']
# obs = ['ctio','kpno','kpno']
# MATCH,str[bdam[i]].instrument,type,ind1,ind2,/sort
# obsname = obs[ind2]
# OBSERVATORY,obsname,obstr
# lat = obstr.latitude
# lon = obstr.longitude
# jd = date2jd(str[bdam[i]].dateobs)
# ra = str[bdam[i]].ra
# dec = str[bdam[i]].dec
# str[bdam[i]].airmass = AIRMASS(jd,ra,dec,lat,lon)
#endfor
# THIS IS STILL RETURNING -1, IS ONE OF THE VALUES WRONG??
# APPLY RELEASE-DATE CUTS
list1 = fits.getdata(basedir+'lists/decam_instcal_list.fits',1)
list2 = fits.getdata(basedir+'lists/mosaic3_instcal_list.fits',1)
list3 = fits.getdata(basedir+'lists/bok90prime_instcal_list.fits',1)
elist = np.hstack((list1,list2,list3))
fluxfile = [f[10:] for f in elist['fluxfile']]
ind1,ind2 = dln.match(fluxfile,cfile)
# some don't match because they were from a previous version
# of the input list
release_date = np.zeros(len(calstr),dtype=(np.str,100))+'2020-01-01 00:00:00'
release_date[ind2] = elist['release_date'][ind1]
release_date = release_date.strip().replace(' ','T')
trelease = Time(release_date, format='isot', scale='utc')
#release_cutoff = [2017,4,24] # v1 - April 24, 2017
#release_cutoff = [2017,10,11] # v2 - Oct 11, 2017
release_cutoff = [2019,7,9] # v3 - July 9, 2019
release_date_cutoff = ('%04d-%02d-%02d' % (release_cutoff[0],release_cutoff[1],release_cutoff[2]))+'T00:00:00'
tcutoff = Time(release_date_cutoff, format='isot', scale='utc')
gdrelease,ngdrelease,bdrelease,nbdrelease = dln.where(trelease.mjd <= tcutoff.mjd,comp=True)
print(str(ngdrelease)+' exposures are PUBLIC')
calstr = calstr[gdrelease] # impose the public data cut
# Zero-point structure
dt_zpstr = np.dtype([('instrument',np.str,10),('filter',np.str,10),('amcoef',float,2),('thresh',0)])
zpstr = np.zeros(10,dtype=dtype_zpstr)
zpstr['thresh'] = 0.5
zpstr['instrument'][0:7] = 'c4d'
zpstr['filter'][0:7] = ['u','g','r','i','z','Y','VR']
zpstr['amcoef'][0] = [-1.60273, -0.375253] # c4d-u
zpstr['amcoef'][1] = [0.277124, -0.198037] # c4d-g
zpstr['amcoef'][2] = [0.516382, -0.115443] # c4d-r
zpstr['amcoef'][3] = [0.380338, -0.067439] # c4d-i
zpstr['amcoef'][4] = [0.123924, -0.096877] # c4d-z
zpstr['amcoef'][5] = [-1.06529, -0.051967] # c4d-Y
zpstr['amcoef'][6] = [1.004357, -0.081105] # c4d-VR
# Mosiac3 z-band
zpstr['instrument'][7] = 'k4m'
zpstr['filter'][7] = 'z'
zpstr['amcoef'][7] = [-2.687201, -0.73573] # k4m-z
# Bok 90Prime, g and r
zpstr['instrument'][8] = 'ksb'
zpstr['filter'][8] = 'g'
zpstr['amcoef'][8] = [-2.859646, -1.40837] # ksb-g
zpstr['instrument'][9] = 'ksb'
zpstr['filter'][9] = 'r'
zpstr['amcoef'][9] = [-4.008771, -0.25718] # ksb-r
nzpstr = len(zpstr)
#STOP,'DOUBLE-CHECK THESE ZERO-POINTS!!!'
# APPLY QA CUTS IN ZEROPOINT AND SEEING
if ~nocuts:
print('APPLYING QA CUTS')
#fwhmthresh = 3.0 # arcsec, v1
fwhmthresh = 2.0 # arcsec, v2
#filters = ['u','g','r','i','z','Y','VR']
#nfilters = len(filters)
#zpthresh = [2.0,2.0,2.0,2.0,2.0,2.0,2.0]
#zpthresh = [0.5,0.5,0.5,0.5,0.5,0.5,0.5]
badzpmask = np.zeros(len(calstr),bool)+True
for i in range(nzpstr):
ind,nind = dln.where((calstr['instrument']==zpstr['instrument']][i]) & (calstr['filter']==zpstr['filter'][i]) & (calstr['success']==1))
print(zpstr['instrument'][i]+'-'+zpstr['filter'][i]+' '+str(nind)+' exposures')
if nind>0:
calstr1 = calstr[ind]
zpterm = calstr1['zpterm']
bdzp,nbdzp = dln.where(~np.isfinite(zpterm)) # fix Infinity/NAN
if nbdzp>0:zpterm[bdzp] = 999999.9
am = calstr1['airmass']
mjd = calstr1['mjd']
bdam,nbdam = dln.where(am < 0.9)
if nbdam>0: am[bdam] = np.median(am)
# I GOT TO HERE IN THE TRANSLATING!!!
glactc,calstr1.ra,calstr1.dec,2000.0,glon,glat,1,/deg
# Measure airmass dependence
gg0,ngg0 = dln.where((np.abs(zpterm)<50) & (am<2.0))
coef0 = dln.poly_fit(am[gg0],zpterm[gg0],1,robust=True)
zpf = dln.poly(am,coef0)
sig0 = np.mad(zpterm[gg0]-zpf[gg0])
gg,ngg = dln.where(np.abs(zpterm-zpf) < (np.maximum(3.5*sig0,0.2)))
coef = dln.poly_fit(am[gg],zpterm[gg],1,robust=True)
print(zpstr['instrument'][i]+'-'+zpstr['filter'][i]+' '+str(coef))
# Trim out bad exposures to determine the correlations and make figures
gg,ngg = dln.where(np.abs(zpterm-zpf) lt (3.5*sig0 > 0.2) and calstr1.airmass lt 2.0 and calstr1.fwhm lt 2.0 and calstr1.rarms lt 0.15 &
calstr1.decrms lt 0.15 and calstr1.success eq 1 and calstr1.wcscal eq 'Successful' and calstr1.zptermerr lt 0.05 &
calstr1.zptermsig lt 0.08 and (calstr1.ngoodchipwcs eq calstr1.nchips) &
(calstr1.instrument ne 'c4d' or calstr1.zpspatialvar_nccd le 5 or (calstr1.instrument eq 'c4d' and calstr1.zpspatialvar_nccd gt 5 and calstr1.zpspatialvar_rms lt 0.1)) and $
np.abs(glat) gt 10 and calstr1.nrefmatch gt 100 and calstr1.exptime ge 30)
# Zpterm with airmass dependence removed
relzpterm = zpterm + 25 # 25 to get "absolute" zpterm
relzpterm -= (zpstr['amcoef'][i])[1]*(am-1)
# CURRENTLY K4M/KSB HAVE EXPTIME-DEPENDENCE IN THE ZEROPOINTS!!
if (zpstr['instrument'][i]=='k4m') | (zpstr['instrument'][i]=='ksb'):
print('REMOVING EXPTIME-DEPENDENCE IN K4M/KSB ZEROPOINTS!!!')
relzpterm += 2.5*np.log10(calstr1['exptime'])
# Fit temporal variation in zpterm
mjd0 = 56200
xx = calstr1['mjd'][gg]-mjd0
yy = relzpterm[gg]
invvar = 1.0/calstr1['zptermerr'][gg]**2
nord = 3
bkspace = 200
sset1 = bspline_iterfit(xx,yy,invvar=invvar,nord=nord,bkspace=bkspace,yfit=yfit1)
sig1 = mad(yy-yfit1)
gd,ngd = dln.where(yy-yfit1 > -3*sig1)
# refit
sset = bspline_iterfit(xx[gd],yy[gd],invvar=invvar[gd],nord=nord,bkspace=bkspace)
yfit = bspline_valu(xx,sset)
allzpfit = bspline_valu(calstr1.mjd-mjd0,sset)
# Make some figures
# ZPterm vs. airmass
pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_airmass'
ps_open,pfile,/color,thick=4,/encap
hess,am[gg],relzpterm[gg],dx=0.01,dy=0.02,xr=[0.9,2.5],yr=[-0.5,0.5]+median(relzpterm[gg]),xtit='Airmass',ytit='Zero-point',$
tit=zpstr[i].instrument+'-'+zpstr[i].filter
x = scale_vector(findgen(100),0.5,2.0)
oplot,x,poly(x,coef),co=250
ps_close
ps2png,pfile+'.eps',/eps
# ZPterm vs. time (density)
pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_time_density'
ps_open,pfile,/color,thick=4,/encap
hess,calstr1[gg].mjd-mjd0,relzpterm[gg],dx=2,dy=0.02,yr=[-0.5,0.5]+median(relzpterm[gg]),xtit='Time (days)',ytit='Zero-point',$
tit=zpstr[i].instrument+'-'+zpstr[i].filter
oplot,calstr1[gg].mjd-mjd0,allzpfit[gg],ps=1,sym=0.3,co=250
xyouts,50,-0.45+median(relzpterm[gg]),'MJD!d0!n = '+str(mjd0,2),align=0,charsize=1.2
ps_close
ps2png,pfile+'.eps',/eps
# ZPterm vs. time (points)
pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_time'
ps_open,pfile,/color,thick=4,/encap
plot,calstr1[gg].mjd-mjd0,relzpterm[gg],ps=1,sym=0.5,yr=[-0.5,0.5]+median(relzpterm[gg]),xs=1,ys=1,xtit='Time (days)',ytit='Zero-point',$
tit=zpstr[i].instrument+'-'+zpstr[i].filter,thick=1
oplot,calstr1[gg].mjd-mjd0,allzpfit[gg],ps=1,sym=0.3,co=250
xyouts,50,-0.45+median(relzpterm[gg]),'MJD!d0!n = '+str(mjd0,2),align=0,charsize=1.2
ps_close
ps2png,pfile+'.eps',/eps
# Remove temporal variations to get residual values
relzpterm -= allzpfit
# Find the GOOD exposures
#------------------------
# We are using ADDITIVE zpterm
# calmag = instmag + zpterm
# if there are clouds then instmag is larger/fainter
# and zpterm is smaller (more negative)
#bdind, = np.where(calstr[ind].zpterm-medzp lt -zpthresh[i],nbdind)
gdmask = (relzpterm >= -zpstr['thresh'][i]) & (relzpterm <= zpstr['thresh'][i])
gdind,ngdind,bdind,nbdind = dln.where(gdmask,comp=True)
print(' '+str(nbdind)+' exposures with ZPTERM below the threshold')
if ngdind>0: badzpmask[ind[gdind]] = 0
# Get bad DECaLS and SMASH exposures
badexp = np.zeros(len(calstr),bool)
READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/smash_badexposures.txt',smashexpnum,format='A',comment='#',/silent
MATCH,int(calstr.expnum),int(smashexpnum),ind1,ind2,/sort,count=nmatch
if nmatch>0:
badexp[ind1] = 1
badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='c4d') # make sure they are DECam exposures
READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/decals_bad_expid.txt',decalsexpnum,format='A',comment='#',/silent
MATCH,int(calstr.expnum),int(decalsexpnum),ind1,ind2,/sort,count=nmatch
if nmatch>0:
badexp[ind1] = 1
badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='c4d') # make sure they are DECam exposures
READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/mzls_bad_expid.txt',mzlsexpnum,format='A',comment='#',/silent
MATCH,int(calstr.expnum),int(mzlsexpnum),ind1,ind2,/sort,count=nmatch
if nmatch>0:
badexp[ind1] = 1
badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='k4m') # make sure they are Mosaic3 exposures
# Final QA cuts
# Many of the short u-band exposures have weird ZPTERMs, not sure why
# There are a few exposures with BAD WCS, RA>360!
bdexp,nbdexp = dln.where((calstr['success']==0) | # SE failure
(calstr['wcscal']!='Successful') | # CP WCS failure
(calstr['fwhm']>fwhmthresh) | # bad seeing
(calstr['ra']>360) | # bad WCS/coords
(calstr['rarms']>0.15) | (calstr['decrms']>0.15) | # bad WCS
(badzpmask==1) | # bad ZPTERM
(calstr['zptermerr']>0.05) | # bad ZPTERMERR
(calstr['nrefmatch']<5) | # few phot ref match
(badexp==1) | # bad SMASH/LS exposure
#(calstr['ngoodchipwcs']<calstr['nchips'] | # not all chips astrom calibrated
((calstr['instrument']=='c4d') & (calstr['zpspatialvar_nccd']>5) & (calstr['zpspatialvar_rms']>0.1)))) # bad spatial zpterm
# rarms/decrms, nrefmatch
print('QA cuts remove '+str(nbdexp)+' exposures')
# Remove
torem = np.zeros(nchstr,bool)
for i in range(nbdexp): torem[calstr[bdexp[i]].chipindx:calstr[bdexp[i]].chipindx+calstr[bdexp[i]].nchips-1]=1
bdchstr,nbdchstr = dln.where(torem==1)
REMOVE,bdchstr,chstr
REMOVE,bdexp,calstr
# Get new CHIPINDEX values
# make two arrays of old and new indices to transfer
# the new index values into an array with the size of
# the old CHSTR
trimoldindex = lindgen(nchstr) # index into original array, but "bad" ones removed/trimed
remove,bdchstr,trimoldindex
trimnewindex = lindgen(len(trimoldindex)) # new index of trimmed array
newindex = lonarr(nchstr)-1
newindex[trimoldindex] = trimnewindex # new index in original array
newchipindex = newindex[calstr.chipindx]
str.chipindx = newchipindex
ncalstr = len(calstr)
# SHOULD INCLUDE CUTS ON ZTERMERR OR NPHOTMATCH
#STOP,'SHOULD INCLUDE CUTS ON ZTERMERR OR NPHOTMATCH'
#STARTRUNNING:
# CREATE LIST OF HEALPIX AND OVERLAPPING EXPOSURES
# Which healpix pixels have data
listfile = basedir+'lists/nsc_instcal_combine_healpix_list.fits'
if makelist | ~os.path.exists(listfile):
print('Finding the Healpix pixels with data')
radius = 1.1
dtype_healstr = np.dtype([('file',np.str,200),('base',np.str,200),('pix',int)])
healstr = np.zeros(100000,dtype=dtype_healstr)
nhealstr = len(healstr)
cnt = 0
for i in range(ncalstr):
if i % 1e3 == 0: print(str(i))
theta = (90-calstr[i].dec)/radeg
phi = calstr[i].ra/radeg
ANG2VEC,theta,phi,vec
QUERY_DISC,nside,vec,radius,listpix,nlistpix,/deg,/inclusive
# Use the chip corners to figure out which ones actually overlap
chstr1 = chstr[calstr['chipindx']][i]:calstr['chipindx'][i]+calstr['nchips'][i].nchips]
# rotate to tangent plane so it can handle RA=0/360 and poles properly
ROTSPHCEN,chstr1.vra,chstr1.vdec,calstr[i].ra,calstr[i].dec,vlon,vlat,/gnomic
# loop over healpix
overlap = np.zeros(nlistpix,bool)
for j in range(nlistpix):
PIX2VEC_RING,nside,listpix[j],vec,vertex
vertex = transpose(reform(vertex)) # [1,3,4] -> [4,3]
VEC2ANG,vertex,hdec,hra,/astro
ROTSPHCEN,hra,hdec,calstr[i].ra,calstr[i].dec,hlon,hlat,/gnomic
# loop over chips
for k in range(calstr['nchips'][i]):
overlap[j] >= coords.doPolygonsOverlap(hlon,hlat,vlon[*,k],vlat[*,k])
# Only keep the healpix with real overlaps
gdlistpix,ngdlistpix = dln.where(overlap==1)
if ngdlistpix>0:
listpix = listpix[gdlistpix]
nlistpix = ngdlistpix
else:
del(listpix)
nlistpix = 0
if nlistpix==0:
Exception('No healpix for this exposure. Something is wrong!')
# Add new elements to array
if (cnt+nlistpix)>nhealstr:
old = healstr
healstr = np.zeros(nhealstr+10000,dtype=dtype_healstr)
healstr[0:nhealstr] = old
nhealstr += 1e4
del(old)
# Add to the structure
healstr['file'][cnt:cnt+nlistpix] = calstr['expdir'][i]+'/'+calstr['base'][i]+'_cat.fits'
healstr['base'][cnt:cnt+nlistpix] = calstr['base'][i]
healstr['pix'][cnt:cnt+nlistpix] = listpix
cnt += nlistpix
# Trim extra elements
healstr = healstr[0:cnt]
nhealstr = len(healstr)
# Get uniq pixels
ui = uniq(healstr.pix,sort(healstr.pix))
upix = healstr[ui].pix
nupix = len(upix)
print(calstr(nupix)+' Healpix pixels have overlapping data')
# Get start/stop indices for each pixel
idx = sort(healstr.pix)
healstr = healstr[idx]
q = healstr.pix
lo,nlo = dln.where(q != np.roll(q,1))
#hi, = np.where(q ne shift(q,-1))
hi = [lo[1:nlo-1]-1,nhealstr-1]
nexp = hi-lo+1
dtype_index = np.dtype([('pix',int),('lo',int),('hi',int),('nexp',int)])
index = np.zeros(nupix,dtype=dtype_index)
index['pix'] = upix
index['lo'] = lo
index['hi'] = hi
index['nexp'] = nexp
npix = len(index)
# Replace /net/dl1/ with /dl1/ so it will work on all machines
healstr['file'] = healstr['file'].replace('/net/dl1/','/dl1/')
# Write the full list plus an index
print('Writing list to '+listfile)
Table(healstr).write(listfile)
# append other fits binary tables
hdulist = fits.open(listfile)
hdu = fits.table_to_hdu(Table(indexj)) # second, catalog
hdulist.append(hdu)
hdulist.writeto(listfile,overwrite=True)
hdulist.close()
if os.path.exists(listfile+'.gz'): os.remove(listfile+'.gz')
ret = subprocess.call(['gzip',listfile]) # compress final catalog
# Copy to local directory for faster reading speed
if os.path.exists(localdir+'dnidever/nsc/instcal/'+version+'/'): os.delete(localdir+'dnidever/nsc/instcal/'+version+'/')
os.copy(listfile+'.gz',localdir+'dnidever/nsc/instcal/'+version+'/')
# PUT NSIDE IN HEADER!!
# Using existing list
else:
print('Reading list from '+listfile)
healstr = fits.getdata(listfile,1)
index = fits.getdata(listfile,2)
upix = index['pix']
npix = len(index)
# Copy to local directory for faster reading speed
file_copy,listfile,localdir+'dnidever/nsc/instcal/'+version+'/',/over
# Load the list of healpix pixels for this server to be run LOCALLY
pixfile = basedir+'lists/combine_pix_'+host+'.txt'
READLINE,pixfile,pixlist,count=npixlist
rnd = sort(randomu(1,npixlist)) # RANDOMIZE!!
pixlist = int(pixlist[rnd])
print('Running '+str(npixlist)+' jobs on '+host+' with nmult='+str(nmulti))
cmd = "nsc_instcal_combine,"+str(pixlist,2)+",nside="+str(nside,2)+",version='"+version+"',/local,/filesexist"
if keyword_set(redo) then cmd+=',/redo'
cmddir = strarr(npixlist)+localdir+'dnidever/nsc/instcal/'+version+'/tmp/'
# Now run the combination program on each healpix pixel
a = '' & read,a,prompt='Press RETURN to start'
PBS_DAEMON,cmd,cmddir,jobs=jobs,/hyperthread,/idle,prefix='nsccmb',nmulti=nmulti,wait=1
## Make the commands
#cmd = "nsc_instcal_combine,"+str(index.pix,2)+",nside="+str(nside,2)+",version='"+version+"'"
#if keyword_set(redo) then cmd+=',/redo'
#cmddir = strarr(npix)+localdir+'dnidever/nsc/instcal/'+version+'/tmp/'
## Check if the output file exists
#if not keyword_set(redo) then begin
# outfiles = dir+'combine/'+str(upix/1000,2)+'/'+str(upix,2)+'.fits.gz'
# test = file_test(outfiles)
# gd, = np.where(test eq 0,ngd,comp=bd,ncomp=nbd)
# if nbd gt 0 then begin
# print,str(nbd,2),' files already exist and /redo not set.'
# endif
# if ngd eq 0 then begin
# print,'No files to process'
# return
# endif
# print,str(ngd,2),' files left to process'
# cmd = cmd[gd]
# cmddir = cmddir[gd]
#endif
## Prioritize longest-running jobs FIRST
## Use prediction program
#PIX2ANG_RING,nside,index.pix,theta,phi
#ra = phi*radeg
#dec = 90-theta*radeg
#glactc,ra,dec,2000.0,glon,glat,1,/deg
#dt = predictcombtime(glon,glat,index.nexp)
## Do the sorting
#hsi = reverse(sort(dt))
#cmd = cmd[hsi]
#cmddir = cmddir[hsi]
#dt = dt[hsi]
#index = index[hsi]
# Divide into three using total times
#tot = total(dt>10)
#totcum = total(dt>10,/cum)
#print,min(where(totcum ge tot/3))
#print,min(where(totcum ge 2*tot/3))
#ncmd = len(cmd)
#nhalf = ncmd/2
## Randomize 1st half for hulk/thing/gp09
#cmd1 = cmd[0:(nhalf-1)]
#cmdadir1 = cmddir[0:(nhalf-1)]
#pix1 = index[0:(nhalf-1)].pix
#index1 = index[0:(nhalf-1)]
## now randomize
#rnd = sort(randomu(1,len(cmd1)))
#cmd1 = cmd1[rnd]
#cmddir1 = cmddir1[rnd]
#pix1 = pix1[rnd]
#index1 = index1[rnd]
# Slice it up
## hulk, 1st
##cmd = cmd[0:(nhalf-1):3]
##cmddir = cmddir[0:(nhalf-1):3]
##pix = index[0:(nhalf-1):3].pix
#cmd = cmd1[0:(nhalf/3)-1]
#cmddir = cmddir1[0:(nhalf/3)-1]
#pix = pix1[0:(nhalf/3)-1]
# thing, 2nd
##cmd = cmd[1:(nhalf-1):3]
##cmddir = cmddir[1:(nhalf-1):3]
##pix = index[1:(nhalf-1):3].pix
#cmd = cmd1[(nhalf/3):(2*nhalf/3)-1]
#cmddir = cmddir1[(nhalf/3):(2*nhalf/3)-1]
#pix = pix1[(nhalf/3):(2*nhalf/3)-1]
# gp09, 3rd
##cmd = cmd[2:(nhalf-1):3]
##cmddir = cmddir[2:(nhalf-1):3]
##pix = index[2:(nhalf-1):3].pix
#cmd = cmd1[(2*nhalf/3):*]
#cmddir = cmddir1[(2*nhalf/3):*]
#pix = pix1[(2*nhalf/3):*]
# gp05
#cmd = cmd[nhalf:*:4]
#cmddir = cmddir[nhalf:*:4]
#pix = index[nhalf:*:4].pix
# gp06
#cmd = cmd[nhalf+1:*:4]
#cmddir = cmddir[nhalf+1:*:4]
#pix = index[nhalf+1:*:4].pix
# gp07
#cmd = cmd[nhalf+2:*:4]
#cmddir = cmddir[nhalf+2:*:4]
#pix = index[nhalf+2:*:4].pix
# gp08
#cmd = cmd[nhalf+3:*:4]
#cmddir = cmddir[nhalf+3:*:4]
#pix = index[nhalf+3:*:4].pix
## Prioritize longest-running jobs FIRST
## Load the DECam run times
#sum1 = mrdfits(dir+'nsccmb_summary_hulk.fits',1)
#sum2 = mrdfits(dir+'nsccmb_summary_thing.fits',1)
#sum3 = mrdfits(dir+'nsccmb_summary_gp09.fits',1)
#sum = [sum1,sum2,sum3]
#si = sort(sum.mtime)
#sum = sum[si]
## only keep fairly recent ones
#gd, = np.where(sum.mtime gt 1.4897704e+09,ngd)
#sum = sum[gd]
## Deal with duplicates
#dbl = doubles(sum.pix,count=ndbl)
#alldbl = doubles(sum.pix,/all,count=nalldbl)
#torem = bytarr(nalldbl)
#for i=0,ndbl-1 do begin
# MATCH,sum[alldbl].pix,sum[dbl[i]].pix,ind1,ind2,/sort,count=nmatch
# torem[ind1[0:nmatch-2]] = 1
#endfor
#bd=where(torem eq 1,nbd)
#remove,alldbl[bd],sum
#dt = lonarr(len(index))-1
#MATCH,index.pix,sum.pix,ind1,ind2,/sort,count=nmatch
#dt[ind1] = sum[ind2].dt
## Do the sorting
#hsi = reverse(sort(dt))
#cmd = cmd[hsi]
#cmddir = cmddir[hsi]
#dt = dt[hsi]
#
## Divide into three using total times
#tot = total(dt>10)
#totcum = total(dt>10,/cum)
#print,min(where(totcum ge tot/3))
#print,min(where(totcum ge 2*tot/3))
## Start with healpix with low NEXP and far from MW midplane, LMC/SMC
#pix2ang_ring,nside,index.pix,theta,phi
#pixra = phi*radeg
#pixdec = 90-theta*radeg
#glactc,pixra,pixdec,2000.0,pixgl,pixgb,1,/deg
#cel2lmc,pixra,pixdec,palmc,radlmc
#cel2smc,pixra,pixdec,rasmc,radsmc
#gdpix, = np.where(index.nexp lt 50 and np.abs(pixgb) gt 10 and radlmc gt 5 and radsmc gt 5,ngdpix)
#
#outfile = dldir+'users/dnidever/nsc/instcal/combine/'+str(index.pix,2)+'.fits'
# Now run the combination program on each healpix pixel
PBS_DAEMON,cmd,cmddir,jobs=jobs,/hyperthread,/idle,prefix='nsccmb',nmulti=nmulti,wait=1
# RUN NSC_COMBINE_SUMMARY WHEN IT'S DONE!!!
## Load all the summary/metadata files
#print,'Creating Healpix summary file'
#sumstr = replicate({pix:0L,nexposures:0L,nobjects:0L,success:0},nupix)
#sumstr.pix = upix
#for i=0,nupix-1 do begin
# if (i+1) mod 5000 eq 0 then print,i+1
# file = dir+'combine/'+str(upix[i],2)+'.fits'
# if file_test(file) eq 1 then begin
# meta = MRDFITS(file,1,/silent)
# sumstr[i].nexposures = len(meta)
# hd = headfits(file,exten=2)
# sumstr[i].nobjects = sxpar(hd,'naxis2')
# sumstr[i].success = 1
# endif else begin
# sumstr[i].success = 0
# endelse
#endfor
#gd, = np.where(sumstr.success eq 1,ngd)
#print,str(ngd,2),' Healpix successfully processed'
#print,'Writing summary file to ',dir+'combine/nsc_instcal_combine.fits'
#MWRFITS,sumstr,dir+'combine/nsc_instcal_combine.fits',/create
# End logfile
#------------
#JOURNAL
| mit |
louyihua/origin | cmd/service-catalog/go/src/github.com/kubernetes-incubator/service-catalog/vendor/k8s.io/kubernetes/cluster/juju/layers/kubernetes-master/lib/charms/kubernetes/flagmanager.py | 182 | 4599 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core import unitdata
class FlagManager:
'''
FlagManager - A Python class for managing the flags to pass to an
application without remembering what's been set previously.
This is a blind class assuming the operator knows what they are doing.
Each instance of this class should be initialized with the intended
application to manage flags. Flags are then appended to a data-structure
and cached in unitdata for later recall.
THe underlying data-provider is backed by a SQLITE database on each unit,
tracking the dictionary, provided from the 'charmhelpers' python package.
Summary:
opts = FlagManager('docker')
opts.add('bip', '192.168.22.2')
opts.to_s()
'''
def __init__(self, daemon, opts_path=None):
self.db = unitdata.kv()
self.daemon = daemon
if not self.db.get(daemon):
self.data = {}
else:
self.data = self.db.get(daemon)
def __save(self):
self.db.set(self.daemon, self.data)
def add(self, key, value, strict=False):
'''
Adds data to the map of values for the DockerOpts file.
Supports single values, or "multiopt variables". If you
have a flag only option, like --tlsverify, set the value
to None. To preserve the exact value, pass strict
eg:
opts.add('label', 'foo')
opts.add('label', 'foo, bar, baz')
opts.add('flagonly', None)
opts.add('cluster-store', 'consul://a:4001,b:4001,c:4001/swarm',
strict=True)
'''
if strict:
self.data['{}-strict'.format(key)] = value
self.__save()
return
if value:
values = [x.strip() for x in value.split(',')]
# handle updates
if key in self.data and self.data[key] is not None:
item_data = self.data[key]
for c in values:
c = c.strip()
if c not in item_data:
item_data.append(c)
self.data[key] = item_data
else:
# handle new
self.data[key] = values
else:
# handle flagonly
self.data[key] = None
self.__save()
def remove(self, key, value):
'''
Remove a flag value from the DockerOpts manager
Assuming the data is currently {'foo': ['bar', 'baz']}
d.remove('foo', 'bar')
> {'foo': ['baz']}
:params key:
:params value:
'''
self.data[key].remove(value)
self.__save()
def destroy(self, key, strict=False):
'''
Destructively remove all values and key from the FlagManager
Assuming the data is currently {'foo': ['bar', 'baz']}
d.wipe('foo')
>{}
:params key:
:params strict:
'''
try:
if strict:
self.data.pop('{}-strict'.format(key))
else:
self.data.pop('key')
except KeyError:
pass
def to_s(self):
'''
Render the flags to a single string, prepared for the Docker
Defaults file. Typically in /etc/default/docker
d.to_s()
> "--foo=bar --foo=baz"
'''
flags = []
for key in self.data:
if self.data[key] is None:
# handle flagonly
flags.append("{}".format(key))
elif '-strict' in key:
# handle strict values, and do it in 2 steps.
# If we rstrip -strict it strips a tailing s
proper_key = key.rstrip('strict').rstrip('-')
flags.append("{}={}".format(proper_key, self.data[key]))
else:
# handle multiopt and typical flags
for item in self.data[key]:
flags.append("{}={}".format(key, item))
return ' '.join(flags)
| apache-2.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/PyQt4/uic/Compiler/qobjectcreator.py | 18 | 5568 | #############################################################################
##
## Copyright (C) 2012 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
import logging
try:
set()
except NameError:
from sets import Set as set
from PyQt4.uic.Compiler.indenter import write_code
from PyQt4.uic.Compiler.qtproxies import QtGui, Literal, strict_getattr
logger = logging.getLogger(__name__)
DEBUG = logger.debug
class _QtGuiWrapper(object):
def search(clsname):
try:
return strict_getattr(QtGui, clsname)
except AttributeError:
return None
search = staticmethod(search)
class _ModuleWrapper(object):
def __init__(self, name, classes):
if "." in name:
idx = name.rfind(".")
self._package = name[:idx]
self._module = name[idx + 1:]
else:
self._package = None
self._module = name
self._classes = classes
self._used = False
def search(self, cls):
if cls in self._classes:
self._used = True
return type(cls, (QtGui.QWidget,), {"module": self._module})
else:
return None
def _writeImportCode(self):
if self._used:
if self._package is None:
write_code("import %s" % self._module)
else:
write_code("from %s import %s" % (self._package, self._module))
class _CustomWidgetLoader(object):
def __init__(self):
self._widgets = {}
self._usedWidgets = set()
def addCustomWidget(self, widgetClass, baseClass, module):
assert widgetClass not in self._widgets
self._widgets[widgetClass] = (baseClass, module)
def _resolveBaseclass(self, baseClass):
try:
for x in range(0, 10):
try: return strict_getattr(QtGui, baseClass)
except AttributeError: pass
baseClass = self._widgets[baseClass][0]
else:
raise ValueError("baseclass resolve took too long, check custom widgets")
except KeyError:
raise ValueError("unknown baseclass %s" % baseClass)
def search(self, cls):
try:
baseClass = self._resolveBaseclass(self._widgets[cls][0])
DEBUG("resolved baseclass of %s: %s" % (cls, baseClass))
except KeyError:
return None
self._usedWidgets.add(cls)
return type(cls, (baseClass, ), {"module" : ""})
def _writeImportCode(self):
imports = {}
for widget in self._usedWidgets:
_, module = self._widgets[widget]
imports.setdefault(module, []).append(widget)
for module, classes in imports.items():
write_code("from %s import %s" % (module, ", ".join(classes)))
class CompilerCreatorPolicy(object):
def __init__(self):
self._modules = []
def createQtGuiWrapper(self):
return _QtGuiWrapper
def createModuleWrapper(self, name, classes):
mw = _ModuleWrapper(name, classes)
self._modules.append(mw)
return mw
def createCustomWidgetLoader(self):
cw = _CustomWidgetLoader()
self._modules.append(cw)
return cw
def instantiate(self, clsObject, objectname, ctor_args, is_attribute=True, no_instantiation=False):
return clsObject(objectname, is_attribute, ctor_args, no_instantiation)
def invoke(self, rname, method, args):
return method(rname, *args)
def getSlot(self, object, slotname):
return Literal("%s.%s" % (object, slotname))
def _writeOutImports(self):
for module in self._modules:
module._writeImportCode()
| mit |
henaras/horizon | horizon/utils/functions.py | 11 | 4716 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import decimal
import math
import re
from oslo_utils import units
import six
from django.conf import settings
from django.contrib.auth import logout # noqa
from django import http
from django.utils.encoding import force_text
from django.utils.functional import lazy # noqa
from django.utils import translation
def _lazy_join(separator, strings):
return separator.join([force_text(s)
for s in strings])
lazy_join = lazy(_lazy_join, six.text_type)
def bytes_to_gigabytes(bytes):
# Converts the number of bytes to the next highest number of Gigabytes
# For example 5000000 (5 Meg) would return '1'
return int(math.ceil(float(bytes) / units.Gi))
def add_logout_reason(request, response, reason):
# Store the translated string in the cookie
lang = translation.get_language_from_request(request)
with translation.override(lang):
reason = six.text_type(reason).encode('utf-8')
response.set_cookie('logout_reason', reason, max_age=10)
def logout_with_message(request, msg, redirect=True):
"""Send HttpResponseRedirect to LOGOUT_URL.
`msg` is a message displayed on the login page after the logout, to explain
the logout reason.
"""
logout(request)
if redirect:
response = http.HttpResponseRedirect(
'%s?next=%s' % (settings.LOGOUT_URL, request.path))
else:
response = http.HttpResponseRedirect(settings.LOGOUT_URL)
add_logout_reason(request, response, msg)
return response
def get_page_size(request, default=20):
session = request.session
cookies = request.COOKIES
try:
page_size = int(session.get('horizon_pagesize',
cookies.get('horizon_pagesize',
getattr(settings,
'API_RESULT_PAGE_SIZE',
default))))
except ValueError:
page_size = session['horizon_pagesize'] = int(default)
return page_size
def get_log_length(request, default=35):
session = request.session
cookies = request.COOKIES
try:
log_length = int(session.get(
'instance_log_length',
cookies.get('instance_log_length',
getattr(settings,
'INSTANCE_LOG_LENGTH',
default))))
except ValueError:
log_length = session['instance_log_length'] = int(default)
return log_length
def natural_sort(attr):
return lambda x: [int(s) if s.isdigit() else s for s in
re.split(r'(\d+)', getattr(x, attr, x))]
def get_keys(tuple_of_tuples):
"""Processes a tuple of 2-element tuples and returns a tuple containing
first component of each tuple.
"""
return tuple([t[0] for t in tuple_of_tuples])
def value_for_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the value
corresponding to the given key. If not value is found, the key is returned.
"""
for t in tuple_of_tuples:
if t[0] == key:
return t[1]
else:
return key
def next_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the key which comes
after the given key.
"""
for i, t in enumerate(tuple_of_tuples):
if t[0] == key:
try:
return tuple_of_tuples[i + 1][0]
except IndexError:
return None
def previous_key(tuple_of_tuples, key):
"""Processes a tuple of 2-element tuples and returns the key which comes
before the given key.
"""
for i, t in enumerate(tuple_of_tuples):
if t[0] == key:
try:
return tuple_of_tuples[i - 1][0]
except IndexError:
return None
def format_value(value):
"""Returns the given value rounded to one decimal place if it is a
decimal, or integer if it is an integer.
"""
value = decimal.Decimal(str(value))
if int(value) == value:
return int(value)
return round(value, 1)
| apache-2.0 |
oposs/check_mk_mirror | web/plugins/wato/builtin_attributes.py | 1 | 7976 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
declare_host_attribute(ContactGroupsAttribute(),
show_in_table = False,
show_in_folder = True)
declare_host_attribute(NagiosTextAttribute("alias", "alias", _("Alias"),
_("A comment or description of this host"),
"", mandatory=False),
show_in_table = True,
show_in_folder = False)
declare_host_attribute(TextAttribute("ipaddress", _("IPv4 Address"),
_("In case the name of the host is not resolvable via <tt>/etc/hosts</tt> "
"or DNS by your monitoring server, you can specify an explicit IP "
"address or a resolvable DNS name of the host here.<br> <b>Notes</b>:<br> "
"1. If you leave this attribute empty, hostname resolution will be done when "
"you activate the configuration. "
"Check_MKs builtin DNS cache is activated per default in the global "
"configuration to speed up the activation process. The cache is normally "
"updated daily with a cron job. You can manually update the cache with the "
"command <tt>cmk -v --update-dns-cache</tt>.<br>"
"2. If you enter a DNS name here, the DNS resolution will be carried out "
"each time the host is checked. Check_MKs DNS cache will NOT be queried. "
"Use this only for hosts with dynamic IP addresses."
),
allow_empty = False),
show_in_table = True,
show_in_folder = False,
depends_on_tags = ["ip-v4"])
declare_host_attribute(TextAttribute("ipv6address", _("IPv6 Address"),
_("In case the name of the host is not resolvable via <tt>/etc/hosts</tt> "
"or DNS by your monitoring server, you can specify an explicit IPv6 "
"address or a resolvable DNS name of the host here.<br> <b>Notes</b>:<br> "
"1. If you leave this attribute empty, hostname resolution will be done when "
"you activate the configuration. "
"Check_MKs builtin DNS cache is activated per default in the global "
"configuration to speed up the activation process. The cache is normally "
"updated daily with a cron job. You can manually update the cache with the "
"command <tt>cmk -v --update-dns-cache</tt>.<br>"
"2. If you enter a DNS name here, the DNS resolution will be carried out "
"each time the host is checked. Check_MKs DNS cache will NOT be queried. "
"Use this only for hosts with dynamic IP addresses."
),
allow_empty = False),
show_in_table = True,
show_in_folder = False,
depends_on_tags = ["ip-v6"])
declare_host_attribute(TextAttribute("snmp_community", _("SNMP Community"),
_("Using this option you can configure the community which should be used when "
"contacting this host via SNMP v1 or v2. It is possible to configure the SNMP community by "
"using the <a href=\"%s\">SNMP Communities</a> ruleset, but when you configure "
"a community here, this will override the community defined by the rules.") % \
html.makeuri([('mode', 'edit_ruleset'), ('varname', 'snmp_communities')]),
allow_empty = False),
show_in_table = False,
show_in_folder = True,
depends_on_tags = ['snmp'])
# Attribute for configuring parents
class ParentsAttribute(ValueSpecAttribute):
def __init__(self):
ValueSpecAttribute.__init__(self, "parents",
ListOfStrings(
title = _("Parents"),
help = _("Parents are used to configure the reachability of hosts by the "
"monitoring server. A host is considered to be <b>unreachable</b> if all "
"of its parents are unreachable or down. Unreachable hosts will not be "
"actively monitored.<br><br><b>Clusters</b> automatically configure all "
"of their nodes as parents, but only if you do not configure parents "
"manually.<br><br>In a distributed setup make sure that the host and all "
"of its parents are monitored by the same site."),
orientation = "horizontal"))
def to_nagios(self, value):
if value:
return ",".join(value)
def nagios_name(self):
return "parents"
def paint(self, value, hostname):
parts = [ '<a href="%s">%s</a>' % (
"wato.py?" + html.urlencode_vars([("mode", "edithost"), ("host", hn)]), hn)
for hn in value ]
return "", ", ".join(parts)
declare_host_attribute(ParentsAttribute(),
show_in_table = True,
show_in_folder = True)
def validate_host_parents(effective_host):
for parentname in effective_host["parents"]:
parent_folder = find_host(parentname)
if not parent_folder:
raise MKUserError(None, _("You defined the non-existing host '%s' as a parent.") % parentname)
# In case of distributed wato check also if site of host and parent
# are the same.
if is_distributed():
parent = effective_attributes(parent_folder[".hosts"][parentname], parent_folder)
if effective_host["site"] != parent["site"]:
raise MKUserError(None, _("The parent '%s' is monitored on site '%s' while the host itself "
"is monitored on site '%s'. Both must be monitored on the same site. Remember: The parent/child "
"relation is used to describe the reachability of hosts by one monitoring daemon.") %
(parentname, parent["site"], effective_host["site"]))
register_hook('validate-host', validate_host_parents)
| gpl-2.0 |
JazzeYoung/VeryDeepAutoEncoder | theano/misc/tests/test_pycuda_utils.py | 1 | 2362 | from __future__ import absolute_import, print_function, division
import numpy
import theano.sandbox.cuda as cuda
import theano.misc.pycuda_init
if not theano.misc.pycuda_init.pycuda_available: # noqa
from nose.plugins.skip import SkipTest
raise SkipTest("Pycuda not installed. Skip test of theano op with pycuda "
"code.")
if cuda.cuda_available is False: # noqa
from nose.plugins.skip import SkipTest
raise SkipTest('Optional theano package cuda disabled')
from theano.misc.pycuda_utils import to_gpuarray, to_cudandarray
import pycuda.gpuarray
def test_to_gpuarray():
cx = cuda.CudaNdarray.zeros((5, 4))
px = to_gpuarray(cx)
assert isinstance(px, pycuda.gpuarray.GPUArray)
cx[0, 0] = numpy.asarray(1, dtype="float32")
# Check that they share the same memory space
assert px.gpudata == cx.gpudata
assert numpy.asarray(cx[0, 0]) == 1
assert numpy.allclose(numpy.asarray(cx), px.get())
assert px.dtype == cx.dtype
assert px.shape == cx.shape
assert all(numpy.asarray(cx._strides) * 4 == px.strides)
# Test when the CudaNdarray is strided
cx = cx[::2, ::]
px = to_gpuarray(cx, copyif=True)
assert isinstance(px, pycuda.gpuarray.GPUArray)
cx[0, 0] = numpy.asarray(2, dtype="float32")
# Check that they do not share the same memory space
assert px.gpudata != cx.gpudata
assert numpy.asarray(cx[0, 0]) == 2
assert not numpy.allclose(numpy.asarray(cx), px.get())
assert px.dtype == cx.dtype
assert px.shape == cx.shape
assert not all(numpy.asarray(cx._strides) * 4 == px.strides)
# Test that we return an error
try:
px = to_gpuarray(cx)
assert False
except ValueError:
pass
def test_to_cudandarray():
px = pycuda.gpuarray.zeros((3, 4, 5), 'float32')
cx = to_cudandarray(px)
assert isinstance(cx, cuda.CudaNdarray)
assert numpy.allclose(px.get(),
numpy.asarray(cx))
assert px.dtype == cx.dtype
assert px.shape == cx.shape
assert all(numpy.asarray(cx._strides) * 4 == px.strides)
try:
px = pycuda.gpuarray.zeros((3, 4, 5), 'float64')
to_cudandarray(px)
assert False
except ValueError:
pass
try:
to_cudandarray(numpy.zeros(4))
assert False
except ValueError:
pass
| bsd-3-clause |
keimlink/django-cms | cms/models/static_placeholder.py | 49 | 3452 | import uuid
from django.contrib.auth import get_permission_codename
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.models.fields import PlaceholderField
from cms.utils.copy_plugins import copy_plugins_to
def static_slotname(instance):
"""
Returns a string to be used as the slot
for the static placeholder field.
"""
return instance.code
@python_2_unicode_compatible
class StaticPlaceholder(models.Model):
CREATION_BY_TEMPLATE = 'template'
CREATION_BY_CODE = 'code'
CREATION_METHODS = (
(CREATION_BY_TEMPLATE, _('by template')),
(CREATION_BY_CODE, _('by code')),
)
name = models.CharField(
verbose_name=_(u'static placeholder name'), max_length=255, blank=True, default='',
help_text=_(u'Descriptive name to identify this static placeholder. Not displayed to users.'))
code = models.CharField(
verbose_name=_(u'placeholder code'), max_length=255, blank=True,
help_text=_(u'To render the static placeholder in templates.'))
draft = PlaceholderField(static_slotname, verbose_name=_(u'placeholder content'), related_name='static_draft')
public = PlaceholderField(static_slotname, editable=False, related_name='static_public')
dirty = models.BooleanField(default=False, editable=False)
creation_method = models.CharField(
verbose_name=_('creation_method'), choices=CREATION_METHODS,
default=CREATION_BY_CODE, max_length=20, blank=True,
)
site = models.ForeignKey(Site, null=True, blank=True)
class Meta:
verbose_name = _(u'static placeholder')
verbose_name_plural = _(u'static placeholders')
app_label = 'cms'
unique_together = (('code', 'site'),)
def __str__(self):
return self.name
def clean(self):
# TODO: check for clashes if the random code is already taken
if not self.code:
self.code = u'static-%s' % uuid.uuid4()
if not self.site:
placeholders = StaticPlaceholder.objects.filter(code=self.code, site__isnull=True)
if self.pk:
placeholders = placeholders.exclude(pk=self.pk)
if placeholders.exists():
raise ValidationError(_("A static placeholder with the same site and code already exists"))
def publish(self, request, language, force=False):
if force or self.has_publish_permission(request):
self.public.clear(language=language)
plugins = self.draft.get_plugins_list(language=language)
copy_plugins_to(plugins, self.public, no_signals=True)
self.dirty = False
self.save()
return True
return False
def has_change_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts))
def has_publish_permission(self, request):
if request.user.is_superuser:
return True
opts = self._meta
return request.user.has_perm(opts.app_label + '.' + get_permission_codename('change', opts)) and \
request.user.has_perm(opts.app_label + '.' + 'publish_page')
| bsd-3-clause |
perkinslr/pypyjs | addedLibraries/twisted/plugin.py | 1 | 8349 | # -*- test-case-name: twisted.test.test_plugin -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Plugin system for Twisted.
@author: Jp Calderone
@author: Glyph Lefkowitz
"""
import os
import sys
from zope.interface import Interface, providedBy
def _determinePickleModule():
"""
Determine which 'pickle' API module to use.
"""
try:
import cPickle
return cPickle
except ImportError:
import pickle
return pickle
pickle = _determinePickleModule()
from twisted.python.components import getAdapterFactory
from twisted.python.reflect import namedAny
from twisted.python import log
from twisted.python.modules import getModule
class IPlugin(Interface):
"""
Interface that must be implemented by all plugins.
Only objects which implement this interface will be considered for return
by C{getPlugins}. To be useful, plugins should also implement some other
application-specific interface.
"""
class CachedPlugin(object):
def __init__(self, dropin, name, description, provided):
self.dropin = dropin
self.name = name
self.description = description
self.provided = provided
self.dropin.plugins.append(self)
def __repr__(self):
return '<CachedPlugin %r/%r (provides %r)>' % (
self.name, self.dropin.moduleName,
', '.join([i.__name__ for i in self.provided]))
def load(self):
return namedAny(self.dropin.moduleName + '.' + self.name)
def __conform__(self, interface, registry=None, default=None):
for providedInterface in self.provided:
if providedInterface.isOrExtends(interface):
return self.load()
if getAdapterFactory(providedInterface, interface, None) is not None:
return interface(self.load(), default)
return default
# backwards compat HOORJ
getComponent = __conform__
class CachedDropin(object):
"""
A collection of L{CachedPlugin} instances from a particular module in a
plugin package.
@type moduleName: C{str}
@ivar moduleName: The fully qualified name of the plugin module this
represents.
@type description: C{str} or C{NoneType}
@ivar description: A brief explanation of this collection of plugins
(probably the plugin module's docstring).
@type plugins: C{list}
@ivar plugins: The L{CachedPlugin} instances which were loaded from this
dropin.
"""
def __init__(self, moduleName, description):
self.moduleName = moduleName
self.description = description
self.plugins = []
def _generateCacheEntry(provider):
dropin = CachedDropin(provider.__name__,
provider.__doc__)
for k, v in provider.__dict__.iteritems():
plugin = IPlugin(v, None)
if plugin is not None:
# Instantiated for its side-effects.
CachedPlugin(dropin, k, v.__doc__, list(providedBy(plugin)))
return dropin
try:
fromkeys = dict.fromkeys
except AttributeError:
def fromkeys(keys, value=None):
d = {}
for k in keys:
d[k] = value
return d
def getCache(module):
"""
Compute all the possible loadable plugins, while loading as few as
possible and hitting the filesystem as little as possible.
@param module: a Python module object. This represents a package to search
for plugins.
@return: a dictionary mapping module names to L{CachedDropin} instances.
"""
allCachesCombined = {}
mod = getModule(module.__name__)
# don't want to walk deep, only immediate children.
buckets = {}
# Fill buckets with modules by related entry on the given package's
# __path__. There's an abstraction inversion going on here, because this
# information is already represented internally in twisted.python.modules,
# but it's simple enough that I'm willing to live with it. If anyone else
# wants to fix up this iteration so that it's one path segment at a time,
# be my guest. --glyph
for plugmod in mod.iterModules():
fpp = plugmod.filePath.parent()
if fpp not in buckets:
buckets[fpp] = []
bucket = buckets[fpp]
bucket.append(plugmod)
for pseudoPackagePath, bucket in buckets.iteritems():
dropinPath = pseudoPackagePath.child('dropin.cache')
try:
lastCached = dropinPath.getModificationTime()
dropinDotCache = pickle.load(dropinPath.open('r'))
except:
dropinDotCache = {}
lastCached = 0
needsWrite = False
existingKeys = {}
for pluginModule in bucket:
pluginKey = pluginModule.name.split('.')[-1]
existingKeys[pluginKey] = True
if ((pluginKey not in dropinDotCache) or
(pluginModule.filePath.getModificationTime() >= lastCached)):
needsWrite = True
try:
provider = pluginModule.load()
except:
# dropinDotCache.pop(pluginKey, None)
log.err()
else:
entry = _generateCacheEntry(provider)
dropinDotCache[pluginKey] = entry
# Make sure that the cache doesn't contain any stale plugins.
for pluginKey in dropinDotCache.keys():
if pluginKey not in existingKeys:
del dropinDotCache[pluginKey]
needsWrite = True
if needsWrite and os.environ.get("TWISTED_DISABLE_WRITING_OF_PLUGIN_CACHE") is None:
try:
dropinPath.setContent(pickle.dumps(dropinDotCache))
except OSError, e:
log.msg(
format=(
"Unable to write to plugin cache %(path)s: error "
"number %(errno)d"),
path=dropinPath.path, errno=e.errno)
except:
log.err(None, "Unexpected error while writing cache file")
allCachesCombined.update(dropinDotCache)
return allCachesCombined
def getPlugins(interface, package=None):
"""
Retrieve all plugins implementing the given interface beneath the given module.
@param interface: An interface class. Only plugins which implement this
interface will be returned.
@param package: A package beneath which plugins are installed. For
most uses, the default value is correct.
@return: An iterator of plugins.
"""
if package is None:
import twisted.plugins as package
allDropins = getCache(package)
for dropin in allDropins.itervalues():
for plugin in dropin.plugins:
try:
adapted = interface(plugin, None)
except:
log.err()
else:
if adapted is not None:
yield adapted
# Old, backwards compatible name. Don't use this.
getPlugIns = getPlugins
def pluginPackagePaths(name):
"""
Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package.
"""
package = name.split('.')
# Note that this may include directories which do not exist. It may be
# preferable to remove such directories at this point, rather than allow
# them to be searched later on.
#
# Note as well that only '__init__.py' will be considered to make a
# directory a package (and thus exclude it from this list). This means
# that if you create a master plugin package which has some other kind of
# __init__ (eg, __init__.pyc) it will be incorrectly treated as a
# supplementary plugin directory.
return [
os.path.abspath(os.path.join(x, *package))
for x
in sys.path
if
not os.path.exists(os.path.join(x, *package + ['__init__.py']))]
__all__ = ['getPlugins', 'pluginPackagePaths']
| mit |
40223246/w16b_test | static/Brython3.1.3-20150514-095342/Lib/os.py | 635 | 35582 | r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt', 'os2' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
__all__.append('_exit')
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
try:
from os2 import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def _get_masked_mode(mode):
mask = umask(0)
umask(mask)
return mode & ~mask
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(path [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. If the
target directory with the same mode as we specified already exists,
raises an OSError if exist_ok is False, otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
dir_exists = path.isdir(name)
expected_mode = _get_masked_mode(mode)
if dir_exists:
# S_ISGID is automatically copied by the OS from parent to child
# directories on mkdir. Don't consider it being set to be a mode
# mismatch as mkdir does not unset it when not specified in mode.
actual_mode = st.S_IMODE(lstat(name).st_mode) & ~st.S_ISGID
else:
actual_mode = -1
if not (e.errno == errno.EEXIST and exist_ok and dir_exists and
actual_mode == expected_mode):
if dir_exists and actual_mode != expected_mode:
e.strerror += ' (mode %o != expected mode %o)' % (
actual_mode, expected_mode)
raise
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
islink, join, isdir = path.islink, path.join, path.isdir
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except error as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except error as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from collections.abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
__all__.append("unsetenv")
def _createenviron():
if name in ('os2', 'nt'):
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = name not in ('os2', 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
import copyreg as _copyreg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copyreg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
| agpl-3.0 |
sundhaug92/binwalk | src/binwalk/modules/hexdiff.py | 1 | 7229 | import sys
import string
import binwalk.core.common as common
from binwalk.core.compat import *
from binwalk.core.module import Module, Option, Kwarg
class HexDiff(Module):
COLORS = {
'red': '31',
'green': '32',
'blue': '34',
}
SEPERATORS = ['\\', '/']
DEFAULT_BLOCK_SIZE = 16
SKIPPED_LINE = "*"
CUSTOM_DISPLAY_FORMAT = "0x%.8X %s"
TITLE = "Binary Diffing"
CLI = [
Option(short='W',
long='hexdump',
kwargs={'enabled': True},
description='Perform a hexdump / diff of a file or files'),
Option(short='G',
long='green',
kwargs={'show_green': True},
description='Only show lines containing bytes that are the same among all files'),
Option(short='i',
long='red',
kwargs={'show_red': True},
description='Only show lines containing bytes that are different among all files'),
Option(short='U',
long='blue',
kwargs={'show_blue': True},
description='Only show lines containing bytes that are different among some files'),
Option(short='w',
long='terse',
kwargs={'terse': True},
description='Diff all files, but only display a hex dump of the first file'),
]
KWARGS = [
Kwarg(name='show_red', default=False),
Kwarg(name='show_blue', default=False),
Kwarg(name='show_green', default=False),
Kwarg(name='terse', default=False),
Kwarg(name='enabled', default=False),
]
RESULT_FORMAT = "%s\n"
RESULT = ['display']
def _no_colorize(self, c, color="red", bold=True):
return c
def _colorize(self, c, color="red", bold=True):
attr = []
attr.append(self.COLORS[color])
if bold:
attr.append('1')
return "\x1b[%sm%s\x1b[0m" % (';'.join(attr), c)
def _color_filter(self, data):
red = '\x1b[' + self.COLORS['red'] + ';'
green = '\x1b[' + self.COLORS['green'] + ';'
blue = '\x1b[' + self.COLORS['blue'] + ';'
if self.show_blue and blue in data:
return True
elif self.show_green and green in data:
return True
elif self.show_red and red in data:
return True
return False
def hexascii(self, target_data, byte, offset):
color = "green"
for (fp_i, data_i) in iterator(target_data):
diff_count = 0
for (fp_j, data_j) in iterator(target_data):
if fp_i == fp_j:
continue
try:
if data_i[offset] != data_j[offset]:
diff_count += 1
except IndexError as e:
diff_count += 1
if diff_count == len(target_data) - 1:
color = "red"
elif diff_count > 0:
color = "blue"
break
hexbyte = self.colorize("%.2X" % ord(byte), color)
if byte not in string.printable or byte in string.whitespace:
byte = "."
asciibyte = self.colorize(byte, color)
return (hexbyte, asciibyte)
def diff_files(self, target_files):
last_line = None
loop_count = 0
sep_count = 0
# Figure out the maximum diff size (largest file size)
self.status.total = 0
for i in range(0, len(target_files)):
if target_files[i].size > self.status.total:
self.status.total = target_files[i].size
self.status.fp = target_files[i]
while True:
line = ""
done_files = 0
block_data = {}
seperator = self.SEPERATORS[sep_count % 2]
for fp in target_files:
block_data[fp] = fp.read(self.block)
if not block_data[fp]:
done_files += 1
# No more data from any of the target files? Done.
if done_files == len(target_files):
break
for fp in target_files:
hexline = ""
asciiline = ""
for i in range(0, self.block):
if i >= len(block_data[fp]):
hexbyte = "XX"
asciibyte = "."
else:
(hexbyte, asciibyte) = self.hexascii(
block_data, block_data[fp][i], i)
hexline += "%s " % hexbyte
asciiline += "%s" % asciibyte
line += "%s |%s|" % (hexline, asciiline)
if self.terse:
break
if fp != target_files[-1]:
line += " %s " % seperator
offset = fp.offset + (self.block * loop_count)
if not self._color_filter(line):
display = line = self.SKIPPED_LINE
else:
display = self.CUSTOM_DISPLAY_FORMAT % (offset, line)
sep_count += 1
if line != self.SKIPPED_LINE or last_line != line:
self.result(offset=offset, description=line, display=display)
last_line = line
loop_count += 1
self.status.completed += self.block
def init(self):
# To mimic expected behavior, if all options are False, we show
# everything
if not any([self.show_red, self.show_green, self.show_blue]):
self.show_red = self.show_green = self.show_blue = True
# Always disable terminal formatting, as it won't work properly with
# colorized output
self.config.display.fit_to_screen = False
# Set the block size (aka, hexdump line size)
self.block = self.config.block
if not self.block:
self.block = self.DEFAULT_BLOCK_SIZE
# Build a list of files to hexdiff
self.hex_target_files = []
while True:
f = self.next_file(close_previous=False)
if not f:
break
else:
self.hex_target_files.append(f)
# Build the header format string
header_width = (self.block * 4) + 2
if self.terse:
file_count = 1
else:
file_count = len(self.hex_target_files)
self.HEADER_FORMAT = "OFFSET " + \
(("%%-%ds " % header_width) * file_count) + "\n"
# Build the header argument list
self.HEADER = [fp.name for fp in self.hex_target_files]
if self.terse and len(self.HEADER) > 1:
self.HEADER = self.HEADER[0]
# Set up the tty for colorization, if it is supported
if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(
) and not common.MSWindows():
import curses
curses.setupterm()
self.colorize = self._colorize
else:
self.colorize = self._no_colorize
def run(self):
if self.hex_target_files:
self.header()
self.diff_files(self.hex_target_files)
self.footer()
| mit |
mdanielwork/intellij-community | python/helpers/coveragepy/coverage/misc.py | 39 | 7819 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Miscellaneous stuff for coverage.py."""
import errno
import hashlib
import inspect
import locale
import os
import sys
import types
from coverage import env
from coverage.backward import string_class, to_bytes, unicode_class
ISOLATED_MODULES = {}
def isolate_module(mod):
"""Copy a module so that we are isolated from aggressive mocking.
If a test suite mocks os.path.exists (for example), and then we need to use
it during the test, everything will get tangled up if we use their mock.
Making a copy of the module when we import it will isolate coverage.py from
those complications.
"""
if mod not in ISOLATED_MODULES:
new_mod = types.ModuleType(mod.__name__)
ISOLATED_MODULES[mod] = new_mod
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, types.ModuleType):
value = isolate_module(value)
setattr(new_mod, name, value)
return ISOLATED_MODULES[mod]
os = isolate_module(os)
# Use PyContracts for assertion testing on parameters and returns, but only if
# we are running our own test suite.
if env.TESTING:
from contracts import contract # pylint: disable=unused-import
from contracts import new_contract as raw_new_contract
def new_contract(*args, **kwargs):
"""A proxy for contracts.new_contract that doesn't mind happening twice."""
try:
return raw_new_contract(*args, **kwargs)
except ValueError:
# During meta-coverage, this module is imported twice, and
# PyContracts doesn't like redefining contracts. It's OK.
pass
# Define contract words that PyContract doesn't have.
new_contract('bytes', lambda v: isinstance(v, bytes))
if env.PY3:
new_contract('unicode', lambda v: isinstance(v, unicode_class))
else: # pragma: not covered
# We aren't using real PyContracts, so just define a no-op decorator as a
# stunt double.
def contract(**unused):
"""Dummy no-op implementation of `contract`."""
return lambda func: func
def new_contract(*args_unused, **kwargs_unused):
"""Dummy no-op implementation of `new_contract`."""
pass
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
statements = sorted(statements)
lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start is None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
def expensive(fn):
"""A decorator to indicate that a method shouldn't be called more than once.
Normally, this does nothing. During testing, this raises an exception if
called more than once.
"""
if env.TESTING:
attr = "_once_" + fn.__name__
def _wrapped(self):
"""Inner function that checks the cache."""
if hasattr(self, attr):
raise Exception("Shouldn't have called %s more than once" % fn.__name__)
setattr(self, attr, True)
return fn(self)
return _wrapped
else:
return fn
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
"""Remove a file, and don't get annoyed if it doesn't exist."""
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def output_encoding(outfile=None):
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
self.md5 = hashlib.md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, string_class):
self.md5.update(to_bytes(v))
elif isinstance(v, bytes):
self.md5.update(v)
elif v is None:
pass
elif isinstance(v, (int, float)):
self.md5.update(to_bytes(str(v)))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
def hexdigest(self):
"""Retrieve the hex digest of the hash."""
return self.md5.hexdigest()
def _needs_to_implement(that, func_name):
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
raise NotImplementedError(
"{thing} {name!r} needs to implement {func_name}()".format(
thing=thing, name=name, func_name=func_name
)
)
class SimpleRepr(object):
"""A mixin implementing a simple __repr__."""
def __repr__(self):
return "<{klass} @{id:x} {attrs}>".format(
klass=self.__class__.__name__,
id=id(self) & 0xFFFFFF,
attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()),
)
class CoverageException(Exception):
"""An exception specific to coverage.py."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NoCode(NoSource):
"""We couldn't find any code at all."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
| apache-2.0 |
emonty/ansible | lib/ansible/module_utils/yumdnf.py | 34 | 7354 | # -*- coding: utf-8 -*-
#
# # Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Contributing Authors:
# - Ansible Core Team
# - Eduard Snesarev (@verm666)
# - Berend De Schouwer (@berenddeschouwer)
# - Abhijeet Kasurde (@Akasurde)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import time
import glob
import tempfile
from abc import ABCMeta, abstractmethod
from ansible.module_utils._text import to_native
from ansible.module_utils.six import with_metaclass
yumdnf_argument_spec = dict(
argument_spec=dict(
allow_downgrade=dict(type='bool', default=False),
autoremove=dict(type='bool', default=False),
bugfix=dict(required=False, type='bool', default=False),
conf_file=dict(type='str'),
disable_excludes=dict(type='str', default=None),
disable_gpg_check=dict(type='bool', default=False),
disable_plugin=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
download_only=dict(type='bool', default=False),
download_dir=dict(type='str', default=None),
enable_plugin=dict(type='list', default=[]),
enablerepo=dict(type='list', default=[]),
exclude=dict(type='list', default=[]),
installroot=dict(type='str', default="/"),
install_repoquery=dict(type='bool', default=True),
install_weak_deps=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', aliases=['pkg'], default=[]),
releasever=dict(default=None),
security=dict(type='bool', default=False),
skip_broken=dict(type='bool', default=False),
# removed==absent, installed==present, these are accepted as aliases
state=dict(type='str', default=None, choices=['absent', 'installed', 'latest', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
update_only=dict(required=False, default="no", type='bool'),
validate_certs=dict(type='bool', default=True),
lock_timeout=dict(type='int', default=30),
),
required_one_of=[['name', 'list', 'update_cache']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True,
)
class YumDnf(with_metaclass(ABCMeta, object)):
"""
Abstract class that handles the population of instance variables that should
be identical between both YUM and DNF modules because of the feature parity
and shared argument spec
"""
def __init__(self, module):
self.module = module
self.allow_downgrade = self.module.params['allow_downgrade']
self.autoremove = self.module.params['autoremove']
self.bugfix = self.module.params['bugfix']
self.conf_file = self.module.params['conf_file']
self.disable_excludes = self.module.params['disable_excludes']
self.disable_gpg_check = self.module.params['disable_gpg_check']
self.disable_plugin = self.module.params['disable_plugin']
self.disablerepo = self.module.params.get('disablerepo', [])
self.download_only = self.module.params['download_only']
self.download_dir = self.module.params['download_dir']
self.enable_plugin = self.module.params['enable_plugin']
self.enablerepo = self.module.params.get('enablerepo', [])
self.exclude = self.module.params['exclude']
self.installroot = self.module.params['installroot']
self.install_repoquery = self.module.params['install_repoquery']
self.install_weak_deps = self.module.params['install_weak_deps']
self.list = self.module.params['list']
self.names = [p.strip() for p in self.module.params['name']]
self.releasever = self.module.params['releasever']
self.security = self.module.params['security']
self.skip_broken = self.module.params['skip_broken']
self.state = self.module.params['state']
self.update_only = self.module.params['update_only']
self.update_cache = self.module.params['update_cache']
self.validate_certs = self.module.params['validate_certs']
self.lock_timeout = self.module.params['lock_timeout']
# It's possible someone passed a comma separated string since it used
# to be a string type, so we should handle that
self.names = self.listify_comma_sep_strings_in_list(self.names)
self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo)
self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo)
self.exclude = self.listify_comma_sep_strings_in_list(self.exclude)
# Fail if someone passed a space separated string
# https://github.com/ansible/ansible/issues/46301
for name in self.names:
if ' ' in name and not any(spec in name for spec in ['@', '>', '<', '=']):
module.fail_json(
msg='It appears that a space separated string of packages was passed in '
'as an argument. To operate on several packages, pass a comma separated '
'string of packages or a list of packages.'
)
# Sanity checking for autoremove
if self.state is None:
if self.autoremove:
self.state = "absent"
else:
self.state = "present"
if self.autoremove and (self.state != "absent"):
self.module.fail_json(
msg="Autoremove should be used alone or with state=absent",
results=[],
)
# This should really be redefined by both the yum and dnf module but a
# default isn't a bad idea
self.lockfile = '/var/run/yum.pid'
@abstractmethod
def is_lockfile_pid_valid(self):
return
def _is_lockfile_present(self):
return (os.path.isfile(self.lockfile) or glob.glob(self.lockfile)) and self.is_lockfile_pid_valid()
def wait_for_lock(self):
'''Poll until the lock is removed if timeout is a positive number'''
if not self._is_lockfile_present():
return
if self.lock_timeout > 0:
for iteration in range(0, self.lock_timeout):
time.sleep(1)
if not self._is_lockfile_present():
return
self.module.fail_json(msg='{0} lockfile is held by another process'.format(self.pkg_mgr_name))
def listify_comma_sep_strings_in_list(self, some_list):
"""
method to accept a list of strings as the parameter, find any strings
in that list that are comma separated, remove them from the list and add
their comma separated elements to the original list
"""
new_list = []
remove_from_original_list = []
for element in some_list:
if ',' in element:
remove_from_original_list.append(element)
new_list.extend([e.strip() for e in element.split(',')])
for element in remove_from_original_list:
some_list.remove(element)
some_list.extend(new_list)
if some_list == [""]:
return []
return some_list
@abstractmethod
def run(self):
raise NotImplementedError
| gpl-3.0 |
nilnvoid/wagtail | wagtail/project_template/home/migrations/0002_create_homepage.py | 21 | 1690 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# If migration is run multiple times, it may have already been deleted
Page.objects.filter(id=2).delete()
# Create content type for homepage model
homepage_content_type, __ = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
def remove_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
# Page and Site objects CASCADE
HomePage.objects.filter(slug='home', depth=2).delete()
# Delete content type for homepage model
ContentType.objects.filter(model='homepage', app_label='home').delete()
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage, remove_homepage),
]
| bsd-3-clause |
open-synergy/stock-logistics-warehouse | stock_available_unreserved/models/product.py | 3 | 5310 | # -*- coding: utf-8 -*-
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2016 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from openerp import api, fields, models, _
from openerp.tools.float_utils import float_round
from openerp.addons import decimal_precision as dp
UNIT = dp.get_precision('Product Unit of Measure')
class ProductTemplate(models.Model):
_inherit = "product.template"
qty_available_not_res = fields.Float(
string='Quantity On Hand Unreserved', digits=UNIT,
compute='_compute_product_available_not_res')
qty_available_stock_text = fields.Char(
compute='_compute_product_available_not_res',
string='Unreserved stock quantity')
@api.multi
def _compute_product_available_not_res(self):
no_new = self.filtered(
lambda x: not isinstance(x.id, models.NewId))
res = no_new._product_available()
for tmpl in no_new:
qty = res[tmpl.id]['qty_available_not_res']
tmpl.qty_available_not_res = qty
text = res[tmpl.id]['qty_available_stock_text']
tmpl.qty_available_stock_text = text
@api.multi
def _product_available(self, name=None, arg=False):
prod_available = super(ProductTemplate, self)._product_available(name,
arg)
variants = self.env['product.product']
for product in self:
variants += product.product_variant_ids
variant_available = variants._product_available()
for product in self:
if isinstance(product.id, models.NewId):
continue
qty_available_not_res = 0.0
text = ''
for p in product.product_variant_ids:
qty = variant_available[p.id]["qty_available_not_res"]
qty_available_not_res += qty
text = variant_available[p.id]["qty_available_stock_text"]
prod_available[product.id].update({
"qty_available_not_res": qty_available_not_res,
"qty_available_stock_text": text,
})
return prod_available
@api.multi
def action_open_quants_unreserved(self):
products = self._get_products()
result = self._get_act_window_dict('stock.product_open_quants')
result['domain'] = "[('product_id','in',[" + ','.join(
map(str, products)) + "]), ('reservation_id', '=', False)]"
result[
'context'] = "{'search_default_locationgroup': 1, " \
"'search_default_internal_loc': 1}"
return result
class ProductProduct(models.Model):
_inherit = 'product.product'
qty_available_not_res = fields.Float(
string='Qty Available Not Reserved', digits=UNIT,
compute='_compute_qty_available_not_res')
qty_available_stock_text = fields.Char(
compute='_compute_qty_available_not_res', string='Available per stock')
@api.multi
def _compute_qty_available_not_res(self):
res = self._product_available()
for prod in self:
qty = res[prod.id]['qty_available_not_res']
text = res[prod.id]['qty_available_stock_text']
prod.qty_available_not_res = qty
prod.qty_available_stock_text = text
@api.model
def _prepare_domain_available_not_res(self, products):
domain_products = [('product_id', 'in', products.mapped('id'))]
domain_quant = []
domain_quant_loc, _, _ = products._get_domain_locations()
domain_quant += domain_products
domain_quant.append(('reservation_id', '=', False))
domain_quant += domain_quant_loc
return domain_quant
@api.multi
def _product_available_not_res_hook(self, quants):
"""Hook used to introduce possible variations"""
return False
@api.multi
def _product_available(self, field_names=None, arg=False):
res = super(ProductProduct, self).\
_product_available(field_names=field_names,
arg=arg)
domain_quant = self._prepare_domain_available_not_res(self)
quants = self.env['stock.quant'].read_group(
domain_quant,
['product_id', 'location_id', 'qty'],
['product_id', 'location_id'],
lazy=False)
values_prod = {}
for quant in quants:
# create a dictionary with the total value per products
values_prod.setdefault(quant['product_id'][0], 0)
values_prod[quant['product_id'][0]] += quant['qty']
for product in self:
# get total qty for the product
qty = float_round(values_prod.get(product.id, 0.0),
precision_rounding=product.uom_id.rounding)
qty_available_not_res = qty
res[product.id].update({'qty_available_not_res':
qty_available_not_res})
text = str(qty_available_not_res) + _(" On Hand")
res[product.id].update({'qty_available_stock_text': text})
self._product_available_not_res_hook(quants)
return res
| agpl-3.0 |
Scille/parsec-cloud | tests/core/test_bootstrap_organization.py | 1 | 3990 | # Parsec Cloud (https://parsec.cloud) Copyright (c) AGPLv3 2016-2021 Scille SAS
import pytest
from parsec.api.data import UserProfile
from parsec.api.protocol import OrganizationID, HumanHandle
from parsec.core.backend_connection import apiv1_backend_anonymous_cmds_factory
from parsec.core.types import BackendOrganizationBootstrapAddr
from parsec.core.invite import bootstrap_organization, InviteNotFoundError, InviteAlreadyUsedError
@pytest.mark.trio
@pytest.mark.parametrize("with_labels", [False, True])
async def test_good(
running_backend, backend, alice, bob, alice_backend_cmds, user_fs_factory, with_labels
):
org_id = OrganizationID("NewOrg")
org_token = "123456"
await backend.organization.create(org_id, org_token)
organization_addr = BackendOrganizationBootstrapAddr.build(
running_backend.addr, org_id, org_token
)
if with_labels:
human_handle = HumanHandle(email="zack@example.com", label="Zack")
device_label = "PC1"
else:
human_handle = None
device_label = None
async with apiv1_backend_anonymous_cmds_factory(addr=organization_addr) as cmds:
new_device = await bootstrap_organization(
cmds, human_handle=human_handle, device_label=device_label
)
assert new_device is not None
assert new_device.organization_id == org_id
assert new_device.device_label == device_label
assert new_device.human_handle == human_handle
assert new_device.profile == UserProfile.ADMIN
# Test the behavior of this new device
async with user_fs_factory(new_device, initialize_in_v0=True) as newfs:
await newfs.workspace_create("wa")
await newfs.sync()
# Test the device in correct in the backend
backend_user, backend_device = await backend.user.get_user_with_device(
org_id, new_device.device_id
)
assert backend_user.user_id == new_device.user_id
assert backend_user.human_handle == new_device.human_handle
assert backend_user.profile == new_device.profile
assert backend_user.user_certifier is None
if with_labels:
assert backend_user.user_certificate != backend_user.redacted_user_certificate
else:
assert backend_user.user_certificate == backend_user.redacted_user_certificate
assert backend_device.device_id == new_device.device_id
assert backend_device.device_label == new_device.device_label
assert backend_device.device_certifier is None
if with_labels:
assert backend_device.device_certificate != backend_device.redacted_device_certificate
else:
assert backend_device.device_certificate == backend_device.redacted_device_certificate
@pytest.mark.trio
async def test_invalid_token(running_backend, backend):
org_id = OrganizationID("NewOrg")
old_token = "123456"
new_token = "abcdef"
await backend.organization.create(org_id, old_token)
await backend.organization.create(org_id, new_token)
organization_addr = BackendOrganizationBootstrapAddr.build(
running_backend.addr, org_id, old_token
)
async with apiv1_backend_anonymous_cmds_factory(addr=organization_addr) as cmds:
with pytest.raises(InviteNotFoundError):
await bootstrap_organization(cmds, human_handle=None, device_label=None)
@pytest.mark.trio
async def test_already_bootstrapped(
running_backend, backend, alice, bob, alice_backend_cmds, user_fs_factory
):
org_id = OrganizationID("NewOrg")
org_token = "123456"
await backend.organization.create(org_id, org_token)
organization_addr = BackendOrganizationBootstrapAddr.build(
running_backend.addr, org_id, org_token
)
async with apiv1_backend_anonymous_cmds_factory(addr=organization_addr) as cmds:
await bootstrap_organization(cmds, human_handle=None, device_label=None)
with pytest.raises(InviteAlreadyUsedError):
await bootstrap_organization(cmds, human_handle=None, device_label=None)
| agpl-3.0 |
themoken/Canto | canto/cfg/links.py | 1 | 1098 | # -*- coding: utf-8 -*-
#Canto - ncurses RSS reader
# Copyright (C) 2008 Jack Miller <jack@codezen.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
def register(c):
c.handlers = {
"link" : {},
"image" : {}
}
def handler(handlers, path, **kwargs):
if not "text" in kwargs:
kwargs["text"] = False
if not "fetch" in kwargs:
kwargs["fetch"] = False
if not "ext" in kwargs:
kwargs["ext"] = None
handlers.update(\
{kwargs["ext"] : (path, kwargs["text"], kwargs["fetch"])})
def image_handler(path, **kwargs):
handler(c.handlers["image"], path, **kwargs)
def link_handler(path, **kwargs):
handler(c.handlers["link"], path, **kwargs)
c.locals.update({
"link_handler": link_handler,
"image_handler": image_handler})
def post_parse(c):
pass
def validate(c):
pass
def test(c):
pass
| gpl-2.0 |
xfournet/intellij-community | plugins/hg4idea/testData/bin/mercurial/repo.py | 88 | 1379 | # repo.py - repository base classes for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import error
class repository(object):
def capable(self, name):
'''tell whether repo supports named capability.
return False if not supported.
if boolean capability, return True.
if string capability, return string.'''
if name in self.capabilities:
return True
name_eq = name + '='
for cap in self.capabilities:
if cap.startswith(name_eq):
return cap[len(name_eq):]
return False
def requirecap(self, name, purpose):
'''raise an exception if the given capability is not present'''
if not self.capable(name):
raise error.CapabilityError(
_('cannot %s; remote repository does not '
'support the %r capability') % (purpose, name))
def local(self):
return False
def cancopy(self):
return self.local()
def rjoin(self, path):
url = self.url()
if url.endswith('/'):
return url + path
else:
return url + '/' + path
| apache-2.0 |
onceuponatimeforever/oh-mainline | vendor/packages/python-social-auth/social/tests/backends/test_disqus.py | 92 | 2166 | import json
from social.tests.backends.oauth import OAuth2Test
class DisqusOAuth2Test(OAuth2Test):
backend_path = 'social.backends.disqus.DisqusOAuth2'
user_data_url = 'https://disqus.com/api/3.0/users/details.json'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'code': 0,
'response': {
'username': 'foobar',
'numFollowers': 0,
'isFollowing': False,
'numFollowing': 0,
'name': 'Foo Bar',
'numPosts': 0,
'url': '',
'isAnonymous': False,
'rep': 1.231755,
'about': '',
'isFollowedBy': False,
'connections': {},
'emailHash': '5280f14cedf530b544aecc31fcfe0240',
'reputation': 1.231755,
'avatar': {
'small': {
'permalink': 'https://disqus.com/api/users/avatars/'
'foobar.jpg',
'cache': 'https://securecdn.disqus.com/uploads/'
'users/453/4556/avatar32.jpg?1285535379'
},
'isCustom': False,
'permalink': 'https://disqus.com/api/users/avatars/foobar.jpg',
'cache': 'https://securecdn.disqus.com/uploads/users/453/'
'4556/avatar92.jpg?1285535379',
'large': {
'permalink': 'https://disqus.com/api/users/avatars/'
'foobar.jpg',
'cache': 'https://securecdn.disqus.com/uploads/users/'
'453/4556/avatar92.jpg?1285535379'
}
},
'profileUrl': 'http://disqus.com/foobar/',
'numLikesReceived': 0,
'isPrimary': True,
'joinedAt': '2010-09-26T21:09:39',
'id': '1010101',
'location': ''
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| agpl-3.0 |
gjr80/weewx | bin/weedb/tests/test_weedb.py | 3 | 10159 | #
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Test the weedb package.
For this test to work, user 'weewx' must have full access to database 'test':
mysql> grant select, update, create, delete, drop, insert on test.* to weewx@localhost;
"""
from __future__ import with_statement
from __future__ import absolute_import
import unittest
import weedb
import weedb.sqlite
from six.moves import map
sqlite_db_dict = {'database_name': '/tmp/test.sdb', 'driver':'weedb.sqlite', 'timeout': '2'}
mysql_db_dict = {'database_name': 'test_weewx1', 'user':'weewx1', 'password':'weewx1', 'driver':'weedb.mysql'}
# Schema summary:
# (col_number, col_name, col_type, can_be_null, default_value, part_of_primary)
schema = [(0, 'dateTime', 'INTEGER', False, None, True),
(1, 'min', 'REAL', True, None, False),
(2, 'mintime', 'INTEGER', True, None, False),
(3, 'max', 'REAL', True, None, False),
(4, 'maxtime', 'INTEGER', True, None, False),
(5, 'sum', 'REAL', True, None, False),
(6, 'count', 'INTEGER', True, None, False),
(7, 'descript', 'STR', True, None, False)]
class Common(unittest.TestCase):
def setUp(self):
try:
weedb.drop(self.db_dict)
except:
pass
def tearDown(self):
try:
# weedb.drop(self.db_dict)
pass
except:
pass
def populate_db(self):
weedb.create(self.db_dict)
self.assertRaises(weedb.DatabaseExists, weedb.create, self.db_dict)
with weedb.connect(self.db_dict) as _connect:
with weedb.Transaction(_connect) as _cursor:
_cursor.execute("""CREATE TABLE test1 ( dateTime INTEGER NOT NULL UNIQUE PRIMARY KEY,
min REAL, mintime INTEGER, max REAL, maxtime INTEGER, sum REAL, count INTEGER, descript CHAR(20));""")
_cursor.execute("""CREATE TABLE test2 ( dateTime INTEGER NOT NULL UNIQUE PRIMARY KEY,
min REAL, mintime INTEGER, max REAL, maxtime INTEGER, sum REAL, count INTEGER, descript CHAR(20));""")
for irec in range(20):
_cursor.execute("INSERT INTO test1 (dateTime, min, mintime) VALUES (?, ?, ?)", (irec, 10*irec, irec))
def test_drop(self):
self.assertRaises(weedb.NoDatabase, weedb.drop, self.db_dict)
def test_double_create(self):
weedb.create(self.db_dict)
self.assertRaises(weedb.DatabaseExists, weedb.create, self.db_dict)
def test_no_db(self):
self.assertRaises(weedb.NoDatabaseError, weedb.connect, self.db_dict)
def test_no_tables(self):
weedb.create(self.db_dict)
with weedb.connect(self.db_dict) as _connect:
self.assertEqual(_connect.tables(), [])
self.assertRaises(weedb.ProgrammingError, _connect.columnsOf, 'test1')
self.assertRaises(weedb.ProgrammingError, _connect.columnsOf, 'foo')
def test_create(self):
self.populate_db()
with weedb.connect(self.db_dict) as _connect:
self.assertEqual(sorted(_connect.tables()), ['test1', 'test2'])
self.assertEqual(_connect.columnsOf('test1'), ['dateTime', 'min', 'mintime', 'max', 'maxtime', 'sum', 'count', 'descript'])
self.assertEqual(_connect.columnsOf('test2'), ['dateTime', 'min', 'mintime', 'max', 'maxtime', 'sum', 'count', 'descript'])
for icol, col in enumerate(_connect.genSchemaOf('test1')):
self.assertEqual(schema[icol], col)
for icol, col in enumerate(_connect.genSchemaOf('test2')):
self.assertEqual(schema[icol], col)
# Make sure an IntegrityError gets raised in the case of a duplicate key:
with weedb.Transaction(_connect) as _cursor:
self.assertRaises(weedb.IntegrityError, _cursor.execute,
"INSERT INTO test1 (dateTime, min, mintime) VALUES (0, 10, 0)")
def test_bad_table(self):
self.populate_db()
with weedb.connect(self.db_dict) as _connect:
self.assertRaises(weedb.ProgrammingError, _connect.columnsOf, 'foo')
def test_select(self):
self.populate_db()
with weedb.connect(self.db_dict) as _connect:
with _connect.cursor() as _cursor:
_cursor.execute("SELECT dateTime, min FROM test1")
for i, _row in enumerate(_cursor):
self.assertEqual(_row[0], i)
# SELECT with wild card, using a result set
_result = _cursor.execute("SELECT * from test1")
for i, _row in enumerate(_result):
self.assertEqual(_row[0], i)
# Find a matching result set
_cursor.execute("SELECT dateTime, min FROM test1 WHERE dateTime = 5")
_row = _cursor.fetchone()
self.assertEqual(_row[0], 5)
self.assertEqual(_row[1], 50)
# Now test where there is no matching result:
_cursor.execute("SELECT dateTime, min FROM test1 WHERE dateTime = -1")
_row = _cursor.fetchone()
self.assertEqual(_row, None)
def test_bad_select(self):
self.populate_db()
with weedb.connect(self.db_dict) as _connect:
with _connect.cursor() as _cursor:
# Test SELECT on a bad table name
self.assertRaises(weedb.ProgrammingError, _cursor.execute, "SELECT dateTime, min FROM foo")
# Test SELECT on a bad column name
self.assertRaises(weedb.OperationalError, _cursor.execute, "SELECT dateTime, foo FROM test1")
def test_rollback(self):
# Create the database and schema
weedb.create(self.db_dict)
with weedb.connect(self.db_dict) as _connect:
with _connect.cursor() as _cursor:
_cursor.execute("""CREATE TABLE test1 ( dateTime INTEGER NOT NULL UNIQUE PRIMARY KEY, x REAL );""")
# Now start the transaction
_connect.begin()
for i in range(10):
_cursor.execute("""INSERT INTO test1 (dateTime, x) VALUES (?, ?)""", (i, i+1))
# Roll it back
_connect.rollback()
# Make sure nothing is in the database
with weedb.connect(self.db_dict) as _connect:
with _connect.cursor() as _cursor:
_cursor.execute("SELECT dateTime, x from test1")
_row = _cursor.fetchone()
self.assertEqual(_row, None)
def test_transaction(self):
# Create the database and schema
weedb.create(self.db_dict)
with weedb.connect(self.db_dict) as _connect:
# With sqlite, a rollback can roll back a table creation. With MySQL, it does not. So,
# create the table outside of the transaction. We're not as concerned about a transaction failing
# when creating a table, because it only happens the first time weewx starts up.
_connect.execute("""CREATE TABLE test1 ( dateTime INTEGER NOT NULL UNIQUE PRIMARY KEY, x REAL );""")
# We're going to trigger the rollback by raising a bogus exception. Be prepared to catch it.
try:
with weedb.Transaction(_connect) as _cursor:
for i in range(10):
_cursor.execute("""INSERT INTO test1 (dateTime, x) VALUES (?, ?)""", (i, i+1))
# Raise an exception:
raise Exception("Bogus exception")
except Exception:
pass
# Now make sure nothing is in the database
with weedb.connect(self.db_dict) as _connect:
with _connect.cursor() as _cursor:
_cursor.execute("SELECT dateTime, x from test1")
_row = _cursor.fetchone()
self.assertEqual(_row, None)
class TestSqlite(Common):
def __init__(self, *args, **kwargs):
self.db_dict = sqlite_db_dict
super(TestSqlite, self).__init__(*args, **kwargs)
def test_variable(self):
weedb.create(self.db_dict)
with weedb.connect(self.db_dict) as _connect:
if weedb.sqlite.sqlite_version > '3.4.2':
# Early versions of sqlite did not support journal modes. Not sure exactly when it started,
# but I know that v3.4.2 did not have it.
_v = _connect.get_variable('journal_mode')
self.assertEqual(_v[1].lower(), 'delete')
_v = _connect.get_variable('foo')
self.assertEqual(_v, None)
_connect.close()
class TestMySQL(Common):
def setUp(self):
try:
import MySQLdb
except ImportError:
try:
import pymysql as MySQLdb
except ImportError as e:
raise unittest.case.SkipTest(e)
super(TestMySQL, self).setUp()
def __init__(self, *args, **kwargs):
self.db_dict = mysql_db_dict
super(TestMySQL, self).__init__(*args, **kwargs)
def test_variable(self):
weedb.create(self.db_dict)
with weedb.connect(self.db_dict) as _connect:
_v = _connect.get_variable('lower_case_table_names')
self.assertTrue(_v[1] in ['0', '1', '2'], "Unknown lower_case_table_names value")
_v = _connect.get_variable('foo')
self.assertEqual(_v, None)
def suite():
tests = ['test_drop', 'test_double_create', 'test_no_db', 'test_no_tables',
'test_create', 'test_bad_table', 'test_select', 'test_bad_select',
'test_rollback', 'test_transaction', 'test_variable']
return unittest.TestSuite(list(map(TestSqlite, tests)) + list(map(TestMySQL, tests)))
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| gpl-3.0 |
saurabh6790/frappe | frappe/integrations/doctype/token_cache/token_cache.py | 1 | 1988 | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from datetime import datetime, timedelta
import frappe
from frappe import _
from frappe.utils import cstr, cint
from frappe.model.document import Document
class TokenCache(Document):
def get_auth_header(self):
if self.access_token:
headers = {'Authorization': 'Bearer ' + self.get_password('access_token')}
return headers
raise frappe.exceptions.DoesNotExistError
def update_data(self, data):
"""
Store data returned by authorization flow.
Params:
data - Dict with access_token, refresh_token, expires_in and scope.
"""
token_type = cstr(data.get('token_type', '')).lower()
if token_type not in ['bearer', 'mac']:
frappe.throw(_('Received an invalid token type.'))
# 'Bearer' or 'MAC'
token_type = token_type.title() if token_type == 'bearer' else token_type.upper()
self.token_type = token_type
self.access_token = cstr(data.get('access_token', ''))
self.refresh_token = cstr(data.get('refresh_token', ''))
self.expires_in = cint(data.get('expires_in', 0))
new_scopes = data.get('scope')
if new_scopes:
if isinstance(new_scopes, str):
new_scopes = new_scopes.split(' ')
if isinstance(new_scopes, list):
self.scopes = None
for scope in new_scopes:
self.append('scopes', {'scope': scope})
self.state = None
self.save(ignore_permissions=True)
frappe.db.commit()
return self
def get_expires_in(self):
expiry_time = frappe.utils.get_datetime(self.modified) + timedelta(self.expires_in)
return (datetime.now() - expiry_time).total_seconds()
def is_expired(self):
return self.get_expires_in() < 0
def get_json(self):
return {
'access_token': self.get_password('access_token', ''),
'refresh_token': self.get_password('refresh_token', ''),
'expires_in': self.get_expires_in(),
'token_type': self.token_type
}
| mit |
willzhang05/postgrestesting1 | postgrestesting1/lib/python3.5/site-packages/django/contrib/gis/geos/__init__.py | 61 | 1176 | """
The GeoDjango GEOS module. Please consult the GeoDjango documentation
for more details: https://docs.djangoproject.com/en/dev/ref/contrib/gis/geos/
"""
__all__ = ['HAS_GEOS']
try:
from .libgeos import geos_version, geos_version_info # NOQA: flake8 detects only the last __all__
HAS_GEOS = True
__all__ += ['geos_version', 'geos_version_info']
except ImportError:
HAS_GEOS = False
if HAS_GEOS:
from .geometry import GEOSGeometry, wkt_regex, hex_regex
from .point import Point
from .linestring import LineString, LinearRing
from .polygon import Polygon
from .collections import GeometryCollection, MultiPoint, MultiLineString, MultiPolygon
from .error import GEOSException, GEOSIndexError
from .io import WKTReader, WKTWriter, WKBReader, WKBWriter
from .factory import fromfile, fromstr
__all__ += [
'GEOSGeometry', 'wkt_regex', 'hex_regex', 'Point', 'LineString',
'LinearRing', 'Polygon', 'GeometryCollection', 'MultiPoint',
'MultiLineString', 'MultiPolygon', 'GEOSException', 'GEOSIndexError',
'WKTReader', 'WKTWriter', 'WKBReader', 'WKBWriter', 'fromfile',
'fromstr',
]
| mit |
Mitchkoens/sympy | sympy/printing/latex.py | 7 | 69866 | """
A Printer which converts an expression into its LaTeX equivalent.
"""
from __future__ import print_function, division
from sympy.core import S, Add, Symbol
from sympy.core.function import _coeff_isneg
from sympy.core.sympify import SympifyError
from sympy.core.alphabets import greeks
from sympy.core.operations import AssocOp
from sympy.logic.boolalg import true
## sympy.printing imports
from .printer import Printer
from .conventions import split_super_sub, requires_partial
from .precedence import precedence, PRECEDENCE
import mpmath.libmp as mlib
from mpmath.libmp import prec_to_dps
from sympy.core.compatibility import default_sort_key, range
from sympy.utilities.iterables import has_variety
import re
# Hand-picked functions which can be used directly in both LaTeX and MathJax
# Complete list at http://www.mathjax.org/docs/1.1/tex.html#supported-latex-commands
# This variable only contains those functions which sympy uses.
accepted_latex_functions = ['arcsin', 'arccos', 'arctan', 'sin', 'cos', 'tan',
'sinh', 'cosh', 'tanh', 'sqrt', 'ln', 'log', 'sec', 'csc',
'cot', 'coth', 're', 'im', 'frac', 'root', 'arg',
]
tex_greek_dictionary = {
'Alpha': 'A',
'Beta': 'B',
'Epsilon': 'E',
'Zeta': 'Z',
'Eta': 'H',
'Iota': 'I',
'Kappa': 'K',
'Mu': 'M',
'Nu': 'N',
'omicron': 'o',
'Omicron': 'O',
'Rho': 'P',
'Tau': 'T',
'Chi': 'X',
'lamda': r'\lambda',
'Lamda': r'\Lambda',
'khi': r'\chi',
'Khi': r'X',
'varepsilon': r'\varepsilon',
'varkappa': r'\varkappa',
'varphi': r'\varphi',
'varpi': r'\varpi',
'varrho': r'\varrho',
'varsigma': r'\varsigma',
'vartheta': r'\vartheta',
}
other_symbols = set(['aleph', 'beth', 'daleth', 'gimel', 'ell', 'eth', 'hbar',
'hslash', 'mho', 'wp', ])
# Variable name modifiers
modifier_dict = {
# Accents
'mathring': lambda s: r'\mathring{'+s+r'}',
'ddddot': lambda s: r'\ddddot{'+s+r'}',
'dddot': lambda s: r'\dddot{'+s+r'}',
'ddot': lambda s: r'\ddot{'+s+r'}',
'dot': lambda s: r'\dot{'+s+r'}',
'check': lambda s: r'\check{'+s+r'}',
'breve': lambda s: r'\breve{'+s+r'}',
'acute': lambda s: r'\acute{'+s+r'}',
'grave': lambda s: r'\grave{'+s+r'}',
'tilde': lambda s: r'\tilde{'+s+r'}',
'hat': lambda s: r'\hat{'+s+r'}',
'bar': lambda s: r'\bar{'+s+r'}',
'vec': lambda s: r'\vec{'+s+r'}',
'prime': lambda s: "{"+s+"}'",
'prm': lambda s: "{"+s+"}'",
# Faces
'bold': lambda s: r'\boldsymbol{'+s+r'}',
'bm': lambda s: r'\boldsymbol{'+s+r'}',
'cal': lambda s: r'\mathcal{'+s+r'}',
'scr': lambda s: r'\mathscr{'+s+r'}',
'frak': lambda s: r'\mathfrak{'+s+r'}',
# Brackets
'norm': lambda s: r'\left\|{'+s+r'}\right\|',
'avg': lambda s: r'\left\langle{'+s+r'}\right\rangle',
'abs': lambda s: r'\left|{'+s+r'}\right|',
'mag': lambda s: r'\left|{'+s+r'}\right|',
}
greek_letters_set = frozenset(greeks)
class LatexPrinter(Printer):
printmethod = "_latex"
_default_settings = {
"order": None,
"mode": "plain",
"itex": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"long_frac_ratio": 2,
"mul_symbol": None,
"inv_trig_style": "abbreviated",
"mat_str": None,
"mat_delim": "[",
"symbol_names": {},
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation',
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError("'mode' must be one of 'inline', 'plain', "
"'equation' or 'equation*'")
if self._settings['fold_short_frac'] is None and \
self._settings['mode'] == 'inline':
self._settings['fold_short_frac'] = True
mul_symbol_table = {
None: r" ",
"ldot": r" \,.\, ",
"dot": r" \cdot ",
"times": r" \times "
}
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table[self._settings['mul_symbol'] or 'dot']
self._delim_dict = {'(': ')', '[': ']'}
def parenthesize(self, item, level):
if precedence(item) <= level:
return r"\left(%s\right)" % self._print(item)
else:
return self._print(item)
def doprint(self, expr):
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and (expr is not S.NegativeOne
and expr.is_Rational is False)))
def _needs_function_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _needs_mul_brackets(self, expr, first=False, last=False):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of a Mul, False otherwise. This is True for Add,
but also for some container objects that would not need brackets
when appearing last in a Mul, e.g. an Integral. ``last=True``
specifies that this expr is the last to appear in a Mul.
``first=True`` specifies that this expr is the first to appear in a Mul.
"""
from sympy import Integral, Piecewise, Product, Sum
if expr.is_Add:
return True
elif expr.is_Relational:
return True
elif expr.is_Mul:
if not first and _coeff_isneg(expr):
return True
if (not last and
any([expr.has(x) for x in (Integral, Piecewise, Product, Sum)])):
return True
return False
def _needs_add_brackets(self, expr):
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of an Add, False otherwise. This is False for most
things.
"""
if expr.is_Relational:
return True
return False
def _mul_is_clean(self, expr):
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr):
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_bool(self, e):
return r"\mathrm{%s}" % e
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
return r"\mathrm{%s}" % e
def _print_Add(self, expr, order=None):
if self.order == 'none':
terms = list(expr.args)
else:
terms = self._as_ordered_terms(expr, order=order)
tex = ""
for i, term in enumerate(terms):
if i == 0:
pass
elif _coeff_isneg(term):
tex += " - "
term = -term
else:
tex += " + "
term_tex = self._print(term)
if self._needs_add_brackets(term):
term_tex = r"\left(%s\right)" % term_tex
tex += term_tex
return tex
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
str_real = mlib.to_str(expr._mpf_, dps, strip_zeros=True)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_latex_numbers']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \infty"
else:
return str_real
def _print_Mul(self, expr):
if _coeff_isneg(expr):
expr = -expr
tex = "- "
else:
tex = ""
from sympy.simplify import fraction
numer, denom = fraction(expr, exact=True)
separator = self._settings['mul_symbol_latex']
numbersep = self._settings['mul_symbol_latex_numbers']
def convert(expr):
if not expr.is_Mul:
return str(self._print(expr))
else:
_tex = last_term_tex = ""
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
args = expr.args
for i, term in enumerate(args):
term_tex = self._print(term)
if self._needs_mul_brackets(term, first=(i == 0),
last=(i == len(args) - 1)):
term_tex = r"\left(%s\right)" % term_tex
if re.search("[0-9][} ]*$", last_term_tex) and \
re.match("[{ ]*[-+0-9]", term_tex):
# between two numbers
_tex += numbersep
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
if denom is S.One:
# use the original expression here, since fraction() may have
# altered it when producing numer and denom
tex += convert(expr)
else:
snumer = convert(numer)
sdenom = convert(denom)
ldenom = len(sdenom.split())
ratio = self._settings['long_frac_ratio']
if self._settings['fold_short_frac'] \
and ldenom <= 2 and not "^" in sdenom:
# handle short fractions
if self._needs_mul_brackets(numer, last=False):
tex += r"\left(%s\right) / %s" % (snumer, sdenom)
else:
tex += r"%s / %s" % (snumer, sdenom)
elif len(snumer.split()) > ratio*ldenom:
# handle long fractions
if self._needs_mul_brackets(numer, last=True):
tex += r"\frac{1}{%s}%s\left(%s\right)" \
% (sdenom, separator, snumer)
elif numer.is_Mul:
# split a long numerator
a = S.One
b = S.One
for x in numer.args:
if self._needs_mul_brackets(x, last=False) or \
len(convert(a*x).split()) > ratio*ldenom or \
(b.is_commutative is x.is_commutative is False):
b *= x
else:
a *= x
if self._needs_mul_brackets(b, last=True):
tex += r"\frac{%s}{%s}%s\left(%s\right)" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{%s}{%s}%s%s" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{1}{%s}%s%s" % (sdenom, separator, snumer)
else:
tex += r"\frac{%s}{%s}" % (snumer, sdenom)
return tex
def _print_Pow(self, expr):
# Treat x**Rational(1,n) as special case
if expr.exp.is_Rational and abs(expr.exp.p) == 1 and expr.exp.q != 1:
base = self._print(expr.base)
expq = expr.exp.q
if expq == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (expq, base)
else:
tex = r"\sqrt[%d]{%s}" % (expq, base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] \
and expr.exp.is_Rational \
and expr.exp.q != 1:
base, p, q = self._print(expr.base), expr.exp.p, expr.exp.q
if expr.base.is_Function:
return self._print(expr.base, "%s/%s" % (p, q))
if self._needs_brackets(expr.base):
return r"\left(%s\right)^{%s/%s}" % (base, p, q)
return r"%s^{%s/%s}" % (base, p, q)
elif expr.exp.is_Rational and expr.exp.is_negative and expr.base.is_commutative:
# Things like 1/x
return self._print_Mul(expr)
else:
if expr.base.is_Function:
return self._print(expr.base, self._print(expr.exp))
else:
if expr.is_commutative and expr.exp == -1:
#solves issue 4129
#As Mul always simplify 1/x to x**-1
#The objective is achieved with this hack
#first we get the latex for -1 * expr,
#which is a Mul expression
tex = self._print(S.NegativeOne * expr).strip()
#the result comes with a minus and a space, so we remove
if tex[:1] == "-":
return tex[1:].strip()
if self._needs_brackets(expr.base):
tex = r"\left(%s\right)^{%s}"
else:
tex = r"%s^{%s}"
return tex % (self._print(expr.base),
self._print(expr.exp))
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Product(self, expr):
if len(expr.limits) == 1:
tex = r"\prod_{%s=%s}^{%s} " % \
tuple([ self._print(i) for i in expr.limits[0] ])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\prod_{\substack{%s}} " % \
str.join('\\\\', [ _format_ineq(l) for l in expr.limits ])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_BasisDependent(self, expr):
from sympy.vector import Vector
o1 = []
if expr == expr.zero:
return expr.zero._latex_form
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key = lambda x:x[0].__str__())
for k, v in inneritems:
if v == 1:
o1.append(' + ' + k._latex_form)
elif v == -1:
o1.append(' - ' + k._latex_form)
else:
arg_str = '(' + LatexPrinter().doprint(v) + ')'
o1.append(' + ' + arg_str + k._latex_form)
outstr = (''.join(o1))
if outstr[1] != '-':
outstr = outstr[3:]
else:
outstr = outstr[1:]
return outstr
def _print_Indexed(self, expr):
tex = self._print(expr.base)+'_{%s}' % ','.join(
map(self._print, expr.indices))
return tex
def _print_IndexedBase(self, expr):
return self._print(expr.label)
def _print_Derivative(self, expr):
dim = len(expr.variables)
if requires_partial(expr):
diff_symbol = r'\partial'
else:
diff_symbol = r'd'
if dim == 1:
tex = r"\frac{%s}{%s %s}" % (diff_symbol, diff_symbol,
self._print(expr.variables[0]))
else:
multiplicity, i, tex = [], 1, ""
current = expr.variables[0]
for symbol in expr.variables[1:]:
if symbol == current:
i = i + 1
else:
multiplicity.append((current, i))
current, i = symbol, 1
else:
multiplicity.append((current, i))
for x, i in multiplicity:
if i == 1:
tex += r"%s %s" % (diff_symbol, self._print(x))
else:
tex += r"%s %s^{%s}" % (diff_symbol, self._print(x), i)
tex = r"\frac{%s^{%s}}{%s} " % (diff_symbol, dim, tex)
if isinstance(expr.expr, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(expr.expr))
else:
return r"%s %s" % (tex, self._print(expr.expr))
def _print_Subs(self, subs):
expr, old, new = subs.args
latex_expr = self._print(expr)
latex_old = (self._print(e) for e in old)
latex_new = (self._print(e) for e in new)
latex_subs = r'\\ '.join(
e[0] + '=' + e[1] for e in zip(latex_old, latex_new))
return r'\left. %s \right|_{\substack{ %s }}' % (latex_expr, latex_subs)
def _print_Integral(self, expr):
tex, symbols = "", []
# Only up to \iiiint exists
if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits):
# Use len(expr.limits)-1 so that syntax highlighters don't think
# \" is an escaped quote
tex = r"\i" + "i"*(len(expr.limits) - 1) + "nt"
symbols = [r"\, d%s" % self._print(symbol[0])
for symbol in expr.limits]
else:
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] in ['equation', 'equation*'] \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, r"\, d%s" % self._print(symbol))
return r"%s %s%s" % (tex,
str(self._print(expr.function)), "".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to " % self._print(z)
if z0 in (S.Infinity, S.NegativeInfinity):
tex += r"%s}" % self._print(z0)
else:
tex += r"%s^%s}" % (self._print(z0), self._print(dir))
if isinstance(e, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _hprint_Function(self, func):
'''
Logic to decide how to render a function to latex
- if it is a recognized latex name, use the appropriate latex command
- if it is a single letter, just use that letter
- if it is a longer name, then put \operatorname{} around it and be
mindful of undercores in the name
'''
func = self._deal_with_super_sub(func)
if func in accepted_latex_functions:
name = r"\%s" % func
elif len(func) == 1 or func.startswith('\\'):
name = func
else:
name = r"\operatorname{%s}" % func
return name
def _print_Function(self, expr, exp=None):
'''
Render functions to LaTeX, handling functions that LaTeX knows about
e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...).
For single-letter function names, render them as regular LaTeX math
symbols. For multi-letter function names that LaTeX does not know
about, (e.g., Li, sech) use \operatorname{} so that the function name
is rendered in Roman font and LaTeX handles spacing properly.
expr is the expression involving the function
exp is an exponent
'''
func = expr.func.__name__
if hasattr(self, '_print_' + func):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [ str(self._print(arg)) for arg in expr.args ]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = ["asin", "acos", "atan", "acot"]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
func = func
elif inv_trig_style == "full":
func = "arc" + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
name = r'%s^{%s}' % (self._hprint_Function(func), exp)
else:
name = self._hprint_Function(func)
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left (%s \right )}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_UndefinedFunction(self, expr):
return self._hprint_Function(str(expr))
def _print_FunctionClass(self, expr):
if hasattr(expr, '_latex_no_arg'):
return expr._latex_no_arg(self)
return self._hprint_Function(str(expr))
def _print_Lambda(self, expr):
symbols, expr = expr.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
args = (symbols, self._print(expr))
tex = r"\left( %s \mapsto %s \right)" % (symbols, self._print(expr))
return tex
def _print_Min(self, expr, exp=None):
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\min\left(%s\right)" % ", ".join(texargs)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Max(self, expr, exp=None):
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\max\left(%s\right)" % ", ".join(texargs)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_floor(self, expr, exp=None):
tex = r"\lfloor{%s}\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\lceil{%s}\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\left|{%s}\right|" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
_print_Determinant = _print_Abs
def _print_re(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Re {\left (%s \right )}" % self._print(expr.args[0])
else:
tex = r"\Re{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._needs_brackets(expr.args[0]):
tex = r"\Im {\left ( %s \right )}" % self._print(expr.args[0])
else:
tex = r"\Im{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_Not(self, e):
from sympy import Equivalent, Implies
if isinstance(e.args[0], Equivalent):
return self._print_Equivalent(e.args[0], r"\not\equiv")
if isinstance(e.args[0], Implies):
return self._print_Implies(e.args[0], r"\not\Rightarrow")
if (e.args[0].is_Boolean):
return r"\neg (%s)" % self._print(e.args[0])
else:
return r"\neg %s" % self._print(e.args[0])
def _print_LogOp(self, args, char):
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" %s \left(%s\right)" % (char, self._print(arg))
else:
tex += r" %s %s" % (char, self._print(arg))
return tex
def _print_And(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\wedge")
def _print_Or(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\vee")
def _print_Xor(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\veebar")
def _print_Implies(self, e, altchar=None):
return self._print_LogOp(e.args, altchar or r"\Rightarrow")
def _print_Equivalent(self, e, altchar=None):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, altchar or r"\equiv")
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_polar_lift(self, expr, exp=None):
func = r"\operatorname{polar\_lift}"
arg = r"{\left (%s \right )}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (func, exp, arg)
else:
return r"%s%s" % (func, arg)
def _print_ExpBase(self, expr, exp=None):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_elliptic_k(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"K^{%s}%s" % (exp, tex)
else:
return r"K%s" % tex
def _print_elliptic_f(self, expr, exp=None):
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"F^{%s}%s" % (exp, tex)
else:
return r"F%s" % tex
def _print_elliptic_e(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"E^{%s}%s" % (exp, tex)
else:
return r"E%s" % tex
def _print_elliptic_pi(self, expr, exp=None):
if len(expr.args) == 3:
tex = r"\left(%s; %s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]), \
self._print(expr.args[2]))
else:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"\Pi^{%s}%s" % (exp, tex)
else:
return r"\Pi%s" % tex
def _print_gamma(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_uppergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_lowergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\gamma^{%s}%s" % (exp, tex)
else:
return r"\gamma%s" % tex
def _print_expint(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[1])
nu = self._print(expr.args[0])
if exp is not None:
return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex)
else:
return r"\operatorname{E}_{%s}%s" % (nu, tex)
def _print_fresnels(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"S^{%s}%s" % (exp, tex)
else:
return r"S%s" % tex
def _print_fresnelc(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"C^{%s}%s" % (exp, tex)
else:
return r"C%s" % tex
def _print_subfactorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"!\left(%s\right)" % self._print(x)
else:
tex = "!" + self._print(x)
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!" % self._print(x)
else:
tex = self._print(x) + "!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial2(self, expr, exp=None):
x = expr.args[0]
if self._needs_brackets(x):
tex = r"\left(%s\right)!!" % self._print(x)
else:
tex = self._print(x) + "!!"
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_binomial(self, expr, exp=None):
tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
n, k = expr.args
if self._needs_brackets(n):
base = r"\left(%s\right)" % self._print(n)
else:
base = self._print(n)
tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
n, k = expr.args
if self._needs_brackets(k):
sub = r"\left(%s\right)" % self._print(k)
else:
sub = self._print(k)
tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub)
return self._do_exponent(tex, exp)
def _hprint_BesselBase(self, expr, exp, sym):
tex = r"%s" % (sym)
need_exp = False
if exp is not None:
if tex.find('^') == -1:
tex = r"%s^{%s}" % (tex, self._print(exp))
else:
need_exp = True
tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order),
self._print(expr.argument))
if need_exp:
tex = self._do_exponent(tex, exp)
return tex
def _hprint_vec(self, vec):
if len(vec) == 0:
return ""
s = ""
for i in vec[:-1]:
s += "%s, " % self._print(i)
s += self._print(vec[-1])
return s
def _print_besselj(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'J')
def _print_besseli(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'I')
def _print_besselk(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'K')
def _print_bessely(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'Y')
def _print_yn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'y')
def _print_jn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'j')
def _print_hankel1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(1)}')
def _print_hankel2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(2)}')
def _hprint_airy(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (notation, exp, tex)
else:
return r"%s%s" % (notation, tex)
def _hprint_airy_prime(self, expr, exp=None, notation=""):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"{%s^\prime}^{%s}%s" % (notation, exp, tex)
else:
return r"%s^\prime%s" % (notation, tex)
def _print_airyai(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Ai')
def _print_airybi(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Bi')
def _print_airyaiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Ai')
def _print_airybiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Bi')
def _print_hyper(self, expr, exp=None):
tex = r"{{}_{%s}F_{%s}\left(\begin{matrix} %s \\ %s \end{matrix}" \
r"\middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._hprint_vec(expr.ap), self._hprint_vec(expr.bq),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_meijerg(self, expr, exp=None):
tex = r"{G_{%s, %s}^{%s, %s}\left(\begin{matrix} %s & %s \\" \
r"%s & %s \end{matrix} \middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._print(len(expr.bm)), self._print(len(expr.an)),
self._hprint_vec(expr.an), self._hprint_vec(expr.aother),
self._hprint_vec(expr.bm), self._hprint_vec(expr.bother),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, self._print(exp))
return tex
def _print_dirichlet_eta(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\eta^{%s}%s" % (self._print(exp), tex)
return r"\eta%s" % tex
def _print_zeta(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\zeta^{%s}%s" % (self._print(exp), tex)
return r"\zeta%s" % tex
def _print_lerchphi(self, expr, exp=None):
tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args))
if exp is None:
return r"\Phi%s" % tex
return r"\Phi^{%s}%s" % (self._print(exp), tex)
def _print_polylog(self, expr, exp=None):
s, z = map(self._print, expr.args)
tex = r"\left(%s\right)" % z
if exp is None:
return r"\operatorname{Li}_{%s}%s" % (s, tex)
return r"\operatorname{Li}_{%s}^{%s}%s" % (s, self._print(exp), tex)
def _print_jacobi(self, expr, exp=None):
n, a, b, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s,%s\right)}\left(%s\right)" % (n, a, b, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_gegenbauer(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"C_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_chebyshevt(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"T_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_chebyshevu(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"U_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_legendre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"P_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_assoc_legendre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_hermite(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"H_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_laguerre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"L_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_assoc_laguerre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"L_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Ynm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Y_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Znm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Z_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (self._print(exp))
return tex
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Order(self, expr):
s = self._print(expr.expr)
if expr.point and any(p != S.Zero for p in expr.point) or \
len(expr.variables) > 1:
s += '; '
if len(expr.variables) > 1:
s += self._print(expr.variables)
elif len(expr.variables):
s += self._print(expr.variables[0])
s += r'\rightarrow'
if len(expr.point) > 1:
s += self._print(expr.point)
else:
s += self._print(expr.point[0])
return r"\mathcal{O}\left(%s\right)" % s
def _print_Symbol(self, expr):
if expr in self._settings['symbol_names']:
return self._settings['symbol_names'][expr]
return self._deal_with_super_sub(expr.name) if \
'\\' not in expr.name else expr.name
_print_RandomSymbol = _print_Symbol
_print_MatrixSymbol = _print_Symbol
def _deal_with_super_sub(self, string):
name, supers, subs = split_super_sub(string)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
# glue all items together:
if len(supers) > 0:
name += "^{%s}" % " ".join(supers)
if len(subs) > 0:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
gt = r"\gt"
lt = r"\lt"
else:
gt = ">"
lt = "<"
charmap = {
"==": "=",
">": gt,
"<": lt,
">=": r"\geq",
"<=": r"\leq",
"!=": r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c))
for e, c in expr.args[:-1]]
if expr.args[-1].cond == true:
ecpairs.append(r"%s & \text{otherwise}" %
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" %
(self._print(expr.args[-1].expr),
self._print(expr.args[-1].cond)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_MatrixBase(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([ self._print(i) for i in expr[line, :] ]))
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.cols <= 10) is True:
mat_str = 'matrix'
else:
mat_str = 'array'
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
out_str = out_str.replace('%s', '{' + 'c'*expr.cols + '}%s')
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str % r"\\".join(lines)
_print_ImmutableMatrix = _print_MatrixBase
_print_Matrix = _print_MatrixBase
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '_{%s, %s}'%(expr.i, expr.j)
def _print_MatrixSlice(self, expr):
def latexslice(x):
x = list(x)
if x[2] == 1:
del x[2]
if x[1] == x[0] + 1:
del x[1]
if x[0] == 0:
x[0] = ''
return ':'.join(map(self._print, x))
return (self._print(expr.parent) + r'\left[' +
latexslice(expr.rowslice) + ', ' +
latexslice(expr.colslice) + r'\right]')
def _print_BlockMatrix(self, expr):
return self._print(expr.blocks)
def _print_Transpose(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^T" % self._print(mat)
else:
return "%s^T" % self._print(mat)
def _print_Adjoint(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol
if not isinstance(mat, MatrixSymbol):
return r"\left(%s\right)^\dag" % self._print(mat)
else:
return "%s^\dag" % self._print(mat)
def _print_MatAdd(self, expr):
terms = list(expr.args)
tex = " + ".join(map(self._print, terms))
return tex
def _print_MatMul(self, expr):
from sympy import Add, MatAdd, HadamardProduct
def parens(x):
if isinstance(x, (Add, MatAdd, HadamardProduct)):
return r"\left(%s\right)" % self._print(x)
return self._print(x)
return ' '.join(map(parens, expr.args))
def _print_HadamardProduct(self, expr):
from sympy import Add, MatAdd, MatMul
def parens(x):
if isinstance(x, (Add, MatAdd, MatMul)):
return r"\left(%s\right)" % self._print(x)
return self._print(x)
return ' \circ '.join(map(parens, expr.args))
def _print_MatPow(self, expr):
base, exp = expr.base, expr.exp
from sympy.matrices import MatrixSymbol
if not isinstance(base, MatrixSymbol):
return r"\left(%s\right)^{%s}" % (self._print(base), self._print(exp))
else:
return "%s^{%s}" % (self._print(base), self._print(exp))
def _print_ZeroMatrix(self, Z):
return r"\bold{0}"
def _print_Identity(self, I):
return r"\mathbb{I}"
def _print_tuple(self, expr):
return r"\left ( %s\right )" % \
r", \quad ".join([ self._print(i) for i in expr ])
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_list(self, expr):
return r"\left [ %s\right ]" % \
r", \quad ".join([ self._print(i) for i in expr ])
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
val = d[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\left \{ %s\right \}" % r", \quad ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_DiracDelta(self, expr, exp=None):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (
self._print(expr.args[1]), self._print(expr.args[0]))
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_Heaviside(self, expr, exp=None):
tex = r"\theta\left(%s\right)" % self._print(expr.args[0])
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_KroneckerDelta(self, expr, exp=None):
i = self._print(expr.args[0])
j = self._print(expr.args[1])
if expr.args[0].is_Atom and expr.args[1].is_Atom:
tex = r'\delta_{%s %s}' % (i, j)
else:
tex = r'\delta_{%s, %s}' % (i, j)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_LeviCivita(self, expr, exp=None):
indices = map(self._print, expr.args)
if all(x.is_Atom for x in expr.args):
tex = r'\varepsilon_{%s}' % " ".join(indices)
else:
tex = r'\varepsilon_{%s}' % ", ".join(indices)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_ProductSet(self, p):
if len(p.sets) > 1 and not has_variety(p.sets):
return self._print(p.sets[0]) + "^%d" % len(p.sets)
else:
return r" \times ".join(self._print(set) for set in p.sets)
def _print_RandomDomain(self, d):
try:
return 'Domain: ' + self._print(d.as_boolean())
except Exception:
try:
return ('Domain: ' + self._print(d.symbols) + ' in ' +
self._print(d.set))
except:
return 'Domain on ' + self._print(d.symbols)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_set(items)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
items = ", ".join(map(self._print, items))
return r"\left\{%s\right\}" % items
_print_frozenset = _print_set
def _print_Range(self, s):
if len(s) > 4:
it = iter(s)
printset = next(it), next(it), '\ldots', s._last_element
else:
printset = tuple(s)
return (r"\left\{"
+ r", ".join(self._print(el) for el in printset)
+ r"\right\}")
def _print_Interval(self, i):
if i.start == i.end:
return r"\left\{%s\right\}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_Union(self, u):
return r" \cup ".join([self._print(i) for i in u.args])
def _print_Complement(self, u):
return r" \setminus ".join([self._print(i) for i in u.args])
def _print_Intersection(self, u):
return r" \cap ".join([self._print(i) for i in u.args])
def _print_SymmetricDifference(self, u):
return r" \triangle ".join([self._print(i) for i in u.args])
def _print_EmptySet(self, e):
return r"\emptyset"
def _print_Naturals(self, n):
return r"\mathbb{N}"
def _print_Integers(self, i):
return r"\mathbb{Z}"
def _print_Reals(self, i):
return r"\mathbb{R}"
def _print_ImageSet(self, s):
return r"\left\{%s\; |\; %s \in %s\right\}" % (
self._print(s.lamda.expr),
', '.join([self._print(var) for var in s.lamda.variables]),
self._print(s.base_set))
def _print_Contains(self, e):
return r"%s \in %s" % tuple(self._print(a) for a in e.args)
def _print_FiniteField(self, expr):
return r"\mathbb{F}_{%s}" % expr.mod
def _print_IntegerRing(self, expr):
return r"\mathbb{Z}"
def _print_RationalField(self, expr):
return r"\mathbb{Q}"
def _print_RealField(self, expr):
return r"\mathbb{R}"
def _print_ComplexField(self, expr):
return r"\mathbb{C}"
def _print_PolynomialRing(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left[%s\right]" % (domain, symbols)
def _print_FractionField(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left(%s\right)" % (domain, symbols)
def _print_PolynomialRingBase(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
inv = ""
if not expr.is_Poly:
inv = r"S_<^{-1}"
return r"%s%s\left[%s\right]" % (inv, domain, symbols)
def _print_Poly(self, poly):
cls = poly.__class__.__name__
expr = self._print(poly.as_expr())
gens = list(map(self._print, poly.gens))
domain = "domain=%s" % self._print(poly.get_domain())
args = ", ".join([expr] + gens + [domain])
if cls in accepted_latex_functions:
tex = r"\%s {\left (%s \right )}" % (cls, args)
else:
tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args)
return tex
def _print_RootOf(self, root):
cls = root.__class__.__name__
expr = self._print(root.expr)
index = root.index
if cls in accepted_latex_functions:
return r"\%s {\left(%s, %d\right)}" % (cls, expr, index)
else:
return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr, index)
def _print_RootSum(self, expr):
cls = expr.__class__.__name__
args = [self._print(expr.expr)]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
if cls in accepted_latex_functions:
return r"\%s {\left(%s\right)}" % (cls, ", ".join(args))
else:
return r"\operatorname{%s} {\left(%s\right)}" % (cls, ", ".join(args))
def _print_PolyElement(self, poly):
mul_symbol = self._settings['mul_symbol_latex']
return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol)
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self._print(frac.numer)
denom = self._print(frac.denom)
return r"\frac{%s}{%s}" % (numer, denom)
def _print_euler(self, expr):
return r"E_{%s}" % self._print(expr.args[0])
def _print_catalan(self, expr):
return r"C_{%s}" % self._print(expr.args[0])
def _print_MellinTransform(self, expr):
return r"\mathcal{M}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseMellinTransform(self, expr):
return r"\mathcal{M}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_LaplaceTransform(self, expr):
return r"\mathcal{L}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseLaplaceTransform(self, expr):
return r"\mathcal{L}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_FourierTransform(self, expr):
return r"\mathcal{F}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseFourierTransform(self, expr):
return r"\mathcal{F}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_SineTransform(self, expr):
return r"\mathcal{SIN}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseSineTransform(self, expr):
return r"\mathcal{SIN}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_CosineTransform(self, expr):
return r"\mathcal{COS}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_InverseCosineTransform(self, expr):
return r"\mathcal{COS}^{-1}_{%s}\left[%s\right]\left(%s\right)" % (self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(Symbol(object.name))
def _print_Morphism(self, morphism):
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
return "%s\\rightarrow %s" % (domain, codomain)
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(Symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return "%s:%s" % (pretty_name, pretty_morphism)
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(NamedMorphism(
morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [self._print(Symbol(component.name)) for
component in morphism.components]
component_names_list.reverse()
component_names = "\\circ ".join(component_names_list) + ":"
pretty_morphism = self._print_Morphism(morphism)
return component_names + pretty_morphism
def _print_Category(self, morphism):
return "\\mathbf{%s}" % self._print(Symbol(morphism.name))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
latex_result = self._print(diagram.premises)
if diagram.conclusions:
latex_result += "\\Longrightarrow %s" % \
self._print(diagram.conclusions)
return latex_result
def _print_DiagramGrid(self, grid):
latex_result = "\\begin{array}{%s}\n" % ("c" * grid.width)
for i in range(grid.height):
for j in range(grid.width):
if grid[i, j]:
latex_result += latex(grid[i, j])
latex_result += " "
if j != grid.width - 1:
latex_result += "& "
if i != grid.height - 1:
latex_result += "\\\\"
latex_result += "\n"
latex_result += "\\end{array}\n"
return latex_result
def _print_FreeModule(self, M):
return '{%s}^{%s}' % (self._print(M.ring), self._print(M.rank))
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return r"\left[ %s \right]" % ",".join(
'{' + self._print(x) + '}' for x in m)
def _print_SubModule(self, m):
return r"\left< %s \right>" % ",".join(
'{' + self._print(x) + '}' for x in m.gens)
def _print_ModuleImplementedIdeal(self, m):
return r"\left< %s \right>" % ",".join(
'{' + self._print(x) + '}' for [x] in m._module.gens)
def _print_QuotientRing(self, R):
# TODO nicer fractions for few generators...
return r"\frac{%s}{%s}" % (self._print(R.ring), self._print(R.base_ideal))
def _print_QuotientRingElement(self, x):
return r"{%s} + {%s}" % (self._print(x.data), self._print(x.ring.base_ideal))
def _print_QuotientModuleElement(self, m):
return r"{%s} + {%s}" % (self._print(m.data),
self._print(m.module.killed_module))
def _print_QuotientModule(self, M):
# TODO nicer fractions for few generators...
return r"\frac{%s}{%s}" % (self._print(M.base),
self._print(M.killed_module))
def _print_MatrixHomomorphism(self, h):
return r"{%s} : {%s} \to {%s}" % (self._print(h._sympy_matrix()),
self._print(h.domain), self._print(h.codomain))
def _print_BaseScalarField(self, field):
string = field._coord_sys._names[field._index]
return r'\boldsymbol{\mathrm{%s}}' % self._print(Symbol(string))
def _print_BaseVectorField(self, field):
string = field._coord_sys._names[field._index]
return r'\partial_{%s}' % self._print(Symbol(string))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys._names[field._index]
return r'\mathrm{d}%s' % self._print(Symbol(string))
else:
return 'd(%s)' % self._print(field)
string = self._print(field)
return r'\mathrm{d}\left(%s\right)' % string
def _print_Tr(self, p):
#Todo: Handle indices
contents = self._print(p.args[0])
return r'\mbox{Tr}\left(%s\right)' % (contents)
def _print_totient(self, expr):
return r'\phi\left( %s \right)' % self._print(expr.args[0])
def _print_divisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^{%s}%s" % (self._print(exp), tex)
return r"\sigma%s" % tex
def translate(s):
r'''
Check for a modifier ending the string. If present, convert the
modifier to latex and translate the rest recursively.
Given a description of a Greek letter or other special character,
return the appropriate latex.
Let everything else pass as given.
>>> from sympy.printing.latex import translate
>>> translate('alphahatdotprime')
"{\\dot{\\hat{\\alpha}}}'"
'''
# Process the rest
tex = tex_greek_dictionary.get(s)
if tex:
return tex
elif s.lower() in greek_letters_set or s in other_symbols:
return "\\" + s
else:
# Process modifiers, if any, and recurse
for key in sorted(modifier_dict.keys(), key=lambda k:len(k), reverse=True):
if s.lower().endswith(key) and len(s)>len(key):
return modifier_dict[key](translate(s[:-len(key)]))
return s
def latex(expr, **settings):
r"""
Convert the given expression to LaTeX representation.
>>> from sympy import latex, pi, sin, asin, Integral, Matrix, Rational
>>> from sympy.abc import x, y, mu, r, tau
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
order: Any of the supported monomial orderings (currently "lex", "grlex", or
"grevlex"), "old", and "none". This parameter does nothing for Mul objects.
Setting order to "old" uses the compatibility ordering for Add defined in
Printer. For very large expressions, set the 'order' keyword to 'none' if
speed is a concern.
mode: Specifies how the generated code will be delimited. 'mode' can be one
of 'plain', 'inline', 'equation' or 'equation*'. If 'mode' is set to
'plain', then the resulting code will not be delimited at all (this is the
default). If 'mode' is set to 'inline' then inline LaTeX $ $ will be used.
If 'mode' is set to 'equation' or 'equation*', the resulting code will be
enclosed in the 'equation' or 'equation*' environment (remember to import
'amsmath' for 'equation*'), unless the 'itex' option is set. In the latter
case, the ``$$ $$`` syntax is used.
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{\frac{7}{2}}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
itex: Specifies if itex-specific syntax is used, including emitting ``$$ $$``.
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
fold_frac_powers: Emit "^{p/q}" instead of "^{\frac{p}{q}}" for fractional
powers.
>>> print(latex((2*tau)**Rational(7,2), fold_frac_powers=True))
8 \sqrt{2} \tau^{7/2}
fold_func_brackets: Fold function brackets where applicable.
>>> print(latex((2*tau)**sin(Rational(7,2))))
\left(2 \tau\right)^{\sin{\left (\frac{7}{2} \right )}}
>>> print(latex((2*tau)**sin(Rational(7,2)), fold_func_brackets = True))
\left(2 \tau\right)^{\sin {\frac{7}{2}}}
fold_short_frac: Emit "p / q" instead of "\frac{p}{q}" when the
denominator is simple enough (at most two terms and no powers).
The default value is `True` for inline mode, False otherwise.
>>> print(latex(3*x**2/y))
\frac{3 x^{2}}{y}
>>> print(latex(3*x**2/y, fold_short_frac=True))
3 x^{2} / y
long_frac_ratio: The allowed ratio of the width of the numerator to the
width of the denominator before we start breaking off long fractions.
The default value is 2.
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=2))
\frac{\int r\, dr}{2 \pi}
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=0))
\frac{1}{2 \pi} \int r\, dr
mul_symbol: The symbol to use for multiplication. Can be one of None,
"ldot", "dot", or "times".
>>> print(latex((2*tau)**sin(Rational(7,2)), mul_symbol="times"))
\left(2 \times \tau\right)^{\sin{\left (\frac{7}{2} \right )}}
inv_trig_style: How inverse trig functions should be displayed. Can be one
of "abbreviated", "full", or "power". Defaults to "abbreviated".
>>> print(latex(asin(Rational(7,2))))
\operatorname{asin}{\left (\frac{7}{2} \right )}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="full"))
\arcsin{\left (\frac{7}{2} \right )}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="power"))
\sin^{-1}{\left (\frac{7}{2} \right )}
mat_str: Which matrix environment string to emit. "smallmatrix", "matrix",
"array", etc. Defaults to "smallmatrix" for inline mode, "matrix" for
matrices of no more than 10 columns, and "array" otherwise.
>>> print(latex(Matrix(2, 1, [x, y])))
\left[\begin{matrix}x\\y\end{matrix}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_str = "array"))
\left[\begin{array}{c}x\\y\end{array}\right]
mat_delim: The delimiter to wrap around matrices. Can be one of "[", "(",
or the empty string. Defaults to "[".
>>> print(latex(Matrix(2, 1, [x, y]), mat_delim="("))
\left(\begin{matrix}x\\y\end{matrix}\right)
symbol_names: Dictionary of symbols and the custom strings they should be
emitted as.
>>> print(latex(x**2, symbol_names={x:'x_i'}))
x_i^{2}
``latex`` also supports the builtin container types list, tuple, and
dictionary.
>>> print(latex([2/x, y], mode='inline'))
$\left [ 2 / x, \quad y\right ]$
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression."""
print(latex(expr, **settings))
| bsd-3-clause |
cogeorg/black_rhino | examples/firesales_simple/networkx/algorithms/tests/test_boundary.py | 45 | 4439 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
from networkx import convert_node_labels_to_integers as cnlti
class TestBoundary:
def setUp(self):
self.null=nx.null_graph()
self.P10=cnlti(nx.path_graph(10),first_label=1)
self.K10=cnlti(nx.complete_graph(10),first_label=1)
def test_null_node_boundary(self):
"""null graph has empty node boundaries"""
null=self.null
assert_equal(nx.node_boundary(null,[]),[])
assert_equal(nx.node_boundary(null,[],[]),[])
assert_equal(nx.node_boundary(null,[1,2,3]),[])
assert_equal(nx.node_boundary(null,[1,2,3],[4,5,6]),[])
assert_equal(nx.node_boundary(null,[1,2,3],[3,4,5]),[])
def test_null_edge_boundary(self):
"""null graph has empty edge boundaries"""
null=self.null
assert_equal(nx.edge_boundary(null,[]),[])
assert_equal(nx.edge_boundary(null,[],[]),[])
assert_equal(nx.edge_boundary(null,[1,2,3]),[])
assert_equal(nx.edge_boundary(null,[1,2,3],[4,5,6]),[])
assert_equal(nx.edge_boundary(null,[1,2,3],[3,4,5]),[])
def test_path_node_boundary(self):
"""Check node boundaries in path graph."""
P10=self.P10
assert_equal(nx.node_boundary(P10,[]),[])
assert_equal(nx.node_boundary(P10,[],[]),[])
assert_equal(nx.node_boundary(P10,[1,2,3]),[4])
assert_equal(sorted(nx.node_boundary(P10,[4,5,6])),[3, 7])
assert_equal(sorted(nx.node_boundary(P10,[3,4,5,6,7])),[2, 8])
assert_equal(nx.node_boundary(P10,[8,9,10]),[7])
assert_equal(sorted(nx.node_boundary(P10,[4,5,6],[9,10])),[])
def test_path_edge_boundary(self):
"""Check edge boundaries in path graph."""
P10=self.P10
assert_equal(nx.edge_boundary(P10,[]),[])
assert_equal(nx.edge_boundary(P10,[],[]),[])
assert_equal(nx.edge_boundary(P10,[1,2,3]),[(3, 4)])
assert_equal(sorted(nx.edge_boundary(P10,[4,5,6])),[(4, 3), (6, 7)])
assert_equal(sorted(nx.edge_boundary(P10,[3,4,5,6,7])),[(3, 2), (7, 8)])
assert_equal(nx.edge_boundary(P10,[8,9,10]),[(8, 7)])
assert_equal(sorted(nx.edge_boundary(P10,[4,5,6],[9,10])),[])
assert_equal(nx.edge_boundary(P10,[1,2,3],[3,4,5]) ,[(2, 3), (3, 4)])
def test_k10_node_boundary(self):
"""Check node boundaries in K10"""
K10=self.K10
assert_equal(nx.node_boundary(K10,[]),[])
assert_equal(nx.node_boundary(K10,[],[]),[])
assert_equal(sorted(nx.node_boundary(K10,[1,2,3])),
[4, 5, 6, 7, 8, 9, 10])
assert_equal(sorted(nx.node_boundary(K10,[4,5,6])),
[1, 2, 3, 7, 8, 9, 10])
assert_equal(sorted(nx.node_boundary(K10,[3,4,5,6,7])),
[1, 2, 8, 9, 10])
assert_equal(nx.node_boundary(K10,[4,5,6],[]),[])
assert_equal(nx.node_boundary(K10,K10),[])
assert_equal(nx.node_boundary(K10,[1,2,3],[3,4,5]),[4, 5])
def test_k10_edge_boundary(self):
"""Check edge boundaries in K10"""
K10=self.K10
assert_equal(nx.edge_boundary(K10,[]),[])
assert_equal(nx.edge_boundary(K10,[],[]),[])
assert_equal(len(nx.edge_boundary(K10,[1,2,3])),21)
assert_equal(len(nx.edge_boundary(K10,[4,5,6,7])),24)
assert_equal(len(nx.edge_boundary(K10,[3,4,5,6,7])),25)
assert_equal(len(nx.edge_boundary(K10,[8,9,10])),21)
assert_equal(sorted(nx.edge_boundary(K10,[4,5,6],[9,10])),
[(4, 9), (4, 10), (5, 9), (5, 10), (6, 9), (6, 10)])
assert_equal(nx.edge_boundary(K10,[1,2,3],[3,4,5]),
[(1, 3), (1, 4), (1, 5), (2, 3), (2, 4),
(2, 5), (3, 4), (3, 5)])
def test_petersen(self):
"""Check boundaries in the petersen graph
cheeger(G,k)=min(|bdy(S)|/|S| for |S|=k, 0<k<=|V(G)|/2)
"""
from random import sample
P=nx.petersen_graph()
def cheeger(G,k):
return min([float(len(nx.node_boundary(G,sample(G.nodes(),k))))/k
for n in range(100)])
assert_almost_equals(cheeger(P,1),3.00,places=2)
assert_almost_equals(cheeger(P,2),2.00,places=2)
assert_almost_equals(cheeger(P,3),1.67,places=2)
assert_almost_equals(cheeger(P,4),1.00,places=2)
assert_almost_equals(cheeger(P,5),0.80,places=2)
| gpl-3.0 |
kytvi2p/Sigil | 3rdparty/python/Lib/warnings.py | 24 | 14303 | """Python part of the warnings subsystem."""
import sys
__all__ = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
if file is None:
# sys.stderr is None when run with pythonw.exe - warnings get lost
return
try:
file.write(formatwarning(message, category, filename, lineno, line))
except OSError:
pass # the file (probably stderr) is invalid - this warning gets lost.
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
import linecache
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
_filters_mutated()
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
_filters_mutated()
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
_filters_mutated()
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if registry.get('version', 0) != _filters_version:
registry.clear()
registry['version'] = _filters_version
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
import linecache
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._module._filters_mutated()
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module._filters_mutated()
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit, _filters_mutated)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
_filters_version = 1
def _filters_mutated():
global _filters_version
_filters_version += 1
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
| gpl-3.0 |
patrickstocklin/chattR | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| gpl-2.0 |
ZachRiegel/scriptbin | pypyjs/modules/test/test_strop.py | 34 | 6615 | import warnings
warnings.filterwarnings("ignore", "strop functions are obsolete;",
DeprecationWarning,
r'test.test_strop|unittest')
import strop
import unittest
import sys
from test import test_support
class StropFunctionTestCase(unittest.TestCase):
def test_atoi(self):
self.assertTrue(strop.atoi(" 1 ") == 1)
self.assertRaises(ValueError, strop.atoi, " 1x")
self.assertRaises(ValueError, strop.atoi, " x1 ")
def test_atol(self):
self.assertTrue(strop.atol(" 1 ") == 1L)
self.assertRaises(ValueError, strop.atol, " 1x")
self.assertRaises(ValueError, strop.atol, " x1 ")
def test_atof(self):
self.assertTrue(strop.atof(" 1 ") == 1.0)
self.assertRaises(ValueError, strop.atof, " 1x")
self.assertRaises(ValueError, strop.atof, " x1 ")
def test_capitalize(self):
self.assertTrue(strop.capitalize(" hello ") == " hello ")
self.assertTrue(strop.capitalize("hello ") == "Hello ")
def test_find(self):
self.assertTrue(strop.find("abcdefghiabc", "abc") == 0)
self.assertTrue(strop.find("abcdefghiabc", "abc", 1) == 9)
self.assertTrue(strop.find("abcdefghiabc", "def", 4) == -1)
def test_rfind(self):
self.assertTrue(strop.rfind("abcdefghiabc", "abc") == 9)
def test_lower(self):
self.assertTrue(strop.lower("HeLLo") == "hello")
def test_upper(self):
self.assertTrue(strop.upper("HeLLo") == "HELLO")
def test_swapcase(self):
self.assertTrue(strop.swapcase("HeLLo cOmpUteRs") == "hEllO CoMPuTErS")
def test_strip(self):
self.assertTrue(strop.strip(" \t\n hello \t\n ") == "hello")
def test_lstrip(self):
self.assertTrue(strop.lstrip(" \t\n hello \t\n ") == "hello \t\n ")
def test_rstrip(self):
self.assertTrue(strop.rstrip(" \t\n hello \t\n ") == " \t\n hello")
def test_replace(self):
replace = strop.replace
self.assertTrue(replace("one!two!three!", '!', '@', 1)
== "one@two!three!")
self.assertTrue(replace("one!two!three!", '!', '@', 2)
== "one@two@three!")
self.assertTrue(replace("one!two!three!", '!', '@', 3)
== "one@two@three@")
self.assertTrue(replace("one!two!three!", '!', '@', 4)
== "one@two@three@")
# CAUTION: a replace count of 0 means infinity only to strop,
# not to the string .replace() method or to the
# string.replace() function.
self.assertTrue(replace("one!two!three!", '!', '@', 0)
== "one@two@three@")
self.assertTrue(replace("one!two!three!", '!', '@')
== "one@two@three@")
self.assertTrue(replace("one!two!three!", 'x', '@')
== "one!two!three!")
self.assertTrue(replace("one!two!three!", 'x', '@', 2)
== "one!two!three!")
def test_split(self):
split = strop.split
self.assertTrue(split("this is the split function")
== ['this', 'is', 'the', 'split', 'function'])
self.assertTrue(split("a|b|c|d", '|') == ['a', 'b', 'c', 'd'])
self.assertTrue(split("a|b|c|d", '|', 2) == ['a', 'b', 'c|d'])
self.assertTrue(split("a b c d", None, 1) == ['a', 'b c d'])
self.assertTrue(split("a b c d", None, 2) == ['a', 'b', 'c d'])
self.assertTrue(split("a b c d", None, 3) == ['a', 'b', 'c', 'd'])
self.assertTrue(split("a b c d", None, 4) == ['a', 'b', 'c', 'd'])
self.assertTrue(split("a b c d", None, 0) == ['a', 'b', 'c', 'd'])
self.assertTrue(split("a b c d", None, 2) == ['a', 'b', 'c d'])
def test_join(self):
self.assertTrue(strop.join(['a', 'b', 'c', 'd']) == 'a b c d')
self.assertTrue(strop.join(('a', 'b', 'c', 'd'), '') == 'abcd')
self.assertTrue(strop.join(Sequence()) == 'w x y z')
# try a few long ones
self.assertTrue(strop.join(['x' * 100] * 100, ':')
== (('x' * 100) + ":") * 99 + "x" * 100)
self.assertTrue(strop.join(('x' * 100,) * 100, ':')
== (('x' * 100) + ":") * 99 + "x" * 100)
def test_maketrans(self):
self.assertTrue(strop.maketrans("abc", "xyz") == transtable)
self.assertRaises(ValueError, strop.maketrans, "abc", "xyzq")
def test_translate(self):
self.assertTrue(strop.translate("xyzabcdef", transtable, "def")
== "xyzxyz")
def test_data_attributes(self):
strop.lowercase
strop.uppercase
strop.whitespace
@unittest.skipUnless(sys.maxsize == 2147483647, "only for 32-bit")
def test_expandtabs_overflow(self):
s = '\t\n' * 0x10000 + 'A' * 0x1000000
self.assertRaises(OverflowError, strop.expandtabs, s, 0x10001)
@test_support.precisionbigmemtest(size=test_support._2G - 1, memuse=5)
def test_stropjoin_huge_list(self, size):
a = "A" * size
try:
r = strop.join([a, a], a)
except OverflowError:
pass
else:
self.assertEqual(len(r), len(a) * 3)
@test_support.precisionbigmemtest(size=test_support._2G - 1, memuse=1)
def test_stropjoin_huge_tup(self, size):
a = "A" * size
try:
r = strop.join((a, a), a)
except OverflowError:
pass # acceptable on 32-bit
else:
self.assertEqual(len(r), len(a) * 3)
transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
# join() now works with any sequence type.
class Sequence:
def __init__(self): self.seq = 'wxyz'
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
def test_main():
test_support.run_unittest(StropFunctionTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
joelddiaz/openshift-tools | openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/openshift_health_checker/openshift_checks/logging/fluentd.py | 55 | 6281 | """Check for an aggregated logging Fluentd deployment"""
import json
from openshift_checks import OpenShiftCheckException, OpenShiftCheckExceptionList
from openshift_checks.logging.logging import LoggingCheck
class Fluentd(LoggingCheck):
"""Check for an aggregated logging Fluentd deployment"""
name = "fluentd"
tags = ["health", "logging"]
def run(self):
"""Check the Fluentd deployment and raise an error if any problems are found."""
fluentd_pods = self.get_pods_for_component("fluentd")
self.check_fluentd(fluentd_pods)
return {}
def check_fluentd(self, pods):
"""Verify fluentd is running everywhere. Raises OpenShiftCheckExceptionList if error(s) found."""
node_selector = self.get_var(
'openshift_logging_fluentd_nodeselector',
default='logging-infra-fluentd=true'
)
nodes_by_name = self.get_nodes_by_name()
fluentd_nodes = self.filter_fluentd_labeled_nodes(nodes_by_name, node_selector)
errors = []
errors += self.check_node_labeling(nodes_by_name, fluentd_nodes, node_selector)
errors += self.check_nodes_have_fluentd(pods, fluentd_nodes)
errors += self.check_fluentd_pods_running(pods)
# Make sure there are no extra fluentd pods
if len(pods) > len(fluentd_nodes):
errors.append(OpenShiftCheckException(
'TooManyFluentdPods',
'There are more Fluentd pods running than nodes labeled.\n'
'This may not cause problems with logging but it likely indicates something wrong.'
))
if errors:
raise OpenShiftCheckExceptionList(errors)
def get_nodes_by_name(self):
"""Retrieve all the node definitions. Returns: dict(name: node)"""
nodes_json = self.exec_oc("get nodes -o json", [])
try:
nodes = json.loads(nodes_json)
except ValueError: # no valid json - should not happen
raise OpenShiftCheckException(
"BadOcNodeList",
"Could not obtain a list of nodes to validate fluentd.\n"
"Output from oc get:\n" + nodes_json
)
if not nodes or not nodes.get('items'): # also should not happen
raise OpenShiftCheckException(
"NoNodesDefined",
"No nodes appear to be defined according to the API."
)
return {
node['metadata']['name']: node
for node in nodes['items']
}
@staticmethod
def filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
"""Filter to all nodes with fluentd label. Returns dict(name: node)"""
label, value = node_selector.split('=', 1)
fluentd_nodes = {
name: node for name, node in nodes_by_name.items()
if node['metadata']['labels'].get(label) == value
}
if not fluentd_nodes:
raise OpenShiftCheckException(
'NoNodesLabeled',
'There are no nodes with the fluentd label {label}.\n'
'This means no logs will be aggregated from the nodes.'.format(label=node_selector)
)
return fluentd_nodes
def check_node_labeling(self, nodes_by_name, fluentd_nodes, node_selector):
"""Note if nodes are not labeled as expected. Returns: error list"""
intended_nodes = self.get_var('openshift_logging_fluentd_hosts', default=['--all'])
if not intended_nodes or '--all' in intended_nodes:
intended_nodes = nodes_by_name.keys()
nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
if nodes_missing_labels:
return [OpenShiftCheckException(
'NodesUnlabeled',
'The following nodes are supposed to be labeled with {label} but are not:\n'
' {nodes}\n'
'Fluentd will not aggregate logs from these nodes.'.format(
label=node_selector, nodes=', '.join(nodes_missing_labels)
))]
return []
@staticmethod
def check_nodes_have_fluentd(pods, fluentd_nodes):
"""Make sure fluentd is on all the labeled nodes. Returns: error list"""
unmatched_nodes = fluentd_nodes.copy()
node_names_by_label = {
node['metadata']['labels']['kubernetes.io/hostname']: name
for name, node in fluentd_nodes.items()
}
node_names_by_internal_ip = {
address['address']: name
for name, node in fluentd_nodes.items()
for address in node['status']['addresses']
if address['type'] == "InternalIP"
}
for pod in pods:
for name in [
pod['spec']['nodeName'],
node_names_by_internal_ip.get(pod['spec']['nodeName']),
node_names_by_label.get(pod.get('spec', {}).get('host')),
]:
unmatched_nodes.pop(name, None)
if unmatched_nodes:
return [OpenShiftCheckException(
'MissingFluentdPod',
'The following nodes are supposed to have a Fluentd pod but do not:\n'
' {nodes}\n'
'These nodes will not have their logs aggregated.'.format(
nodes='\n '.join(unmatched_nodes.keys())
))]
return []
def check_fluentd_pods_running(self, pods):
"""Make sure all fluentd pods are running. Returns: error string"""
not_running = super(Fluentd, self).not_running_pods(pods)
if not_running:
return [OpenShiftCheckException(
'FluentdNotRunning',
'The following Fluentd pods are supposed to be running but are not:\n'
' {pods}\n'
'These pods will not aggregate logs from their nodes.'.format(
pods='\n'.join(
" {name} ({host})".format(
name=pod['metadata']['name'],
host=pod['spec'].get('host', 'None')
)
for pod in not_running
)
))]
return []
| apache-2.0 |
sunzuolei/youtube-dl | youtube_dl/extractor/daum.py | 118 | 2755 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
)
class DaumIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
IE_NAME = 'daum.net'
_TESTS = [{
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
'info_dict': {
'id': '52554690',
'ext': 'mp4',
'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
'upload_date': '20130831',
'duration': 3868,
},
}, {
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
'only_matching': True,
}, {
'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
webpage = self._download_webpage(canonical_url, video_id)
full_id = self._search_regex(
r'src=["\']http://videofarm\.daum\.net/controller/video/viewer/Video\.html\?.*?vid=(.+?)[&"\']',
webpage, 'full id')
query = compat_urllib_parse.urlencode({'vid': full_id})
info = self._download_xml(
'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
'Downloading video info')
urls = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
video_id, 'Downloading video formats info')
formats = []
for format_el in urls.findall('result/output_list/output_list'):
profile = format_el.attrib['profile']
format_query = compat_urllib_parse.urlencode({
'vid': full_id,
'profile': profile,
})
url_doc = self._download_xml(
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
video_id, note='Downloading video data for %s format' % profile)
format_url = url_doc.find('result/url').text
formats.append({
'url': format_url,
'format_id': profile,
})
return {
'id': video_id,
'title': info.find('TITLE').text,
'formats': formats,
'thumbnail': self._og_search_thumbnail(webpage),
'description': info.find('CONTENTS').text,
'duration': int(info.find('DURATION').text),
'upload_date': info.find('REGDTTM').text[:8],
}
| unlicense |
rootguy1/code-base | python/sizedist.py | 1 | 2294 | #!/usr/bin/env python
# sizedist.py
# Description : Categorizes files based on their size and prints the number of files in different
# categories.
# Author : Imran Ahmed <researcher6@live.com>
import os
def get_size(directory):
total_size=0
sizes = []
for dirpath, dirnames,filenames in os.walk(directory): #
for filename in filenames: # Itrate through list of files under each subfolder
fname = os.path.join(dirpath, filename) # Get full filename
size = os.path.getsize(fname) # Calculates size in bytes
#print fname # FOR DEBUGING ONLY
size = (size/1024) # To convert into Kbytes
sizes.append(size) # Add to our new list of sizes
sizes.sort() # Sort the sizes
return sizes
def main():
d ="."
d = raw_input("Enter a path or '.' for current directory : ")
if os.path.isdir(d):
sizes =get_size(d)
length = len(sizes) # Total length of sizes
minsize = sizes[0] # Minimum value in the list
maxsize = sizes[length-1] # maximum size value
#print sizes # FOR DEBUGING ONLY
#print "Minimum file size : " , minsize , "k" # FOR DEBUGING ONLY
#print "Maximum file size : " , maxsize , "k" # FOR DEBUGING ONLY
logv = 4
cat = 1
print "|||----------------FILE SIZE SUMMARY----------------|||"
# First we wil print number of files less than 1
count = 0 # initialize count to 0
for x in sizes: # compare all elemnets in the list
if (x<=1): # If size is les than 1
count=count +1 #
print " < 1kB :" , count , " files"
while(cat <= maxsize):
count =0
oldcat = cat
cat = cat * logv
for x in sizes:
if (oldcat < x <= cat):
#print " Cat: " , cat, " size :" , x # FOR DEBUGING ONLY
count = count +1
print oldcat ,"kB to ", cat, "kB : " , count , "files"
else:
print "Directory could not be found"
exit
if __name__ == "__main__":
main()
| gpl-2.0 |
grap/OCB | addons/purchase/__openerp__.py | 55 | 3690 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Management',
'version': '1.1',
'category': 'Purchase Management',
'sequence': 19,
'summary': 'Purchase Orders, Receptions, Supplier Invoices',
'description': """
Manage goods requirement by Purchase Orders easily
==================================================
Purchase management enables you to track your suppliers' price quotations and convert them into purchase orders if necessary.
OpenERP has several methods of monitoring invoices and tracking the receipt of ordered goods. You can handle partial deliveries in OpenERP, so you can keep track of items that are still to be delivered in your orders, and you can issue reminders automatically.
OpenERP’s replenishment management rules enable the system to generate draft purchase orders automatically, or you can configure it to run a lean process driven entirely by current production needs.
Dashboard / Reports for Purchase Management will include:
---------------------------------------------------------
* Request for Quotations
* Purchase Orders Waiting Approval
* Monthly Purchases by Category
* Receptions Analysis
* Purchase Analysis
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images' : ['images/purchase_order.jpeg', 'images/purchase_analysis.jpeg', 'images/request_for_quotation.jpeg'],
'depends': ['stock', 'process', 'procurement'],
'data': [
'security/purchase_security.xml',
'security/ir.model.access.csv',
'purchase_workflow.xml',
'purchase_sequence.xml',
'company_view.xml',
'purchase_data.xml',
'wizard/purchase_order_group_view.xml',
'wizard/purchase_line_invoice_view.xml',
'purchase_report.xml',
'purchase_view.xml',
'stock_view.xml',
'partner_view.xml',
'process/purchase_process.xml',
'report/purchase_report_view.xml',
'board_purchase_view.xml',
'edi/purchase_order_action_data.xml',
'res_config_view.xml',
],
'test': [
'test/process/cancel_order.yml',
'test/process/rfq2order2done.yml',
'test/process/generate_invoice_from_reception.yml',
'test/process/run_scheduler.yml',
'test/process/merge_order.yml',
'test/process/edi_purchase_order.yml',
'test/process/invoice_on_poline.yml',
'test/ui/print_report.yml',
'test/ui/duplicate_order.yml',
'test/ui/delete_order.yml',
],
'demo': [
'purchase_order_demo.yml',
'purchase_demo.xml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NaeiKinDus/xbmc | addons/service.xbmc.versioncheck/lib/shellhandlerapt.py | 152 | 4258 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import xbmc
from common import *
try:
from subprocess import check_output
from subprocess import call
except:
log('subprocess import error')
class ShellHandlerApt:
_pwd = ""
def __init__(self, usesudo=False):
self.sudo = usesudo
installed, candidate = self._check_versions("xbmc", False)
if not installed:
# there is no package installed via repo, so we exit here
log("No installed package found, exiting")
import sys
sys.exit(0)
def _check_versions(self, package, update=True):
_cmd = "apt-cache policy " + package
if update and not self._update_cache():
return False, False
try:
result = check_output([_cmd], shell=True).split("\n")
except Exception as error:
log("ShellHandlerApt: exception while executing shell command %s: %s" %(_cmd, error))
return False, False
if result[0].replace(":", "") == package:
installed = result[1].split()[1]
candidate = result[2].split()[1]
if installed == "(none)":
installed = False
if candidate == "(none)":
candiate = False
return installed, candidate
else:
log("ShellHandlerApt: error during version check")
return False, False
def _update_cache(self):
_cmd = 'apt-get update'
try:
if self.sudo:
x = check_output('echo \'%s\' | sudo -S %s' %(self._getpassword(), _cmd), shell=True)
else:
x = check_output(_cmd.split())
except Exception as error:
log("Exception while executing shell command %s: %s" %(_cmd, error))
return False
return True
def check_upgrade_available(self, package):
'''returns True if newer package is available in the repositories'''
installed, candidate = self._check_versions(package)
if installed and candidate:
if installed != candidate:
log("Version installed %s" %installed)
log("Version available %s" %candidate)
return True
else:
log("Already on newest version")
elif not installed:
log("No installed package found")
return False
else:
return False
def upgrade_package(self, package):
_cmd = "apt-get install -y " + package
try:
if self.sudo:
x = check_output('echo \'%s\' | sudo -S %s' %(self._getpassword(), _cmd), shell=True)
else:
x = check_output(_cmd.split())
log("Upgrade successful")
except Exception as error:
log("Exception while executing shell command %s: %s" %(_cmd, error))
return False
return True
def upgrade_system(self):
_cmd = "apt-get upgrade -y"
try:
log("Upgrading system")
if self.sudo:
x = check_output('echo \'%s\' | sudo -S %s' %(self._getpassword(), _cmd), shell=True)
else:
x = check_output(_cmd.split())
except Exception as error:
log("Exception while executing shell command %s: %s" %(_cmd, error))
return False
return True
def _getpassword(self):
if len(self._pwd) == 0:
self._pwd = get_password_from_user()
return self._pwd
| gpl-2.0 |
sebrandon1/nova | nova/api/openstack/compute/extended_volumes.py | 6 | 3488 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Volumes API extension."""
from nova.api.openstack import api_version_request
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import objects
from nova.policies import extended_volumes as ev_policies
ALIAS = "os-extended-volumes"
class ExtendedVolumesController(wsgi.Controller):
def _extend_server(self, context, server, req, bdms):
volumes_attached = []
for bdm in bdms:
if bdm.get('volume_id'):
volume_attached = {'id': bdm['volume_id']}
if api_version_request.is_supported(req, min_version='2.3'):
volume_attached['delete_on_termination'] = (
bdm['delete_on_termination'])
volumes_attached.append(volume_attached)
# NOTE(mriedem): The os-extended-volumes prefix should not be used for
# new attributes after v2.1. They are only in v2.1 for backward compat
# with v2.0.
key = "%s:volumes_attached" % ExtendedVolumes.alias
server[key] = volumes_attached
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if context.can(ev_policies.BASE_POLICY_NAME, fatal=False):
server = resp_obj.obj['server']
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
context, [server['id']])
instance_bdms = self._get_instance_bdms(bdms, server)
self._extend_server(context, server, req, instance_bdms)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if context.can(ev_policies.BASE_POLICY_NAME, fatal=False):
servers = list(resp_obj.obj['servers'])
instance_uuids = [server['id'] for server in servers]
bdms = objects.BlockDeviceMappingList.bdms_by_instance_uuid(
context, instance_uuids)
for server in servers:
instance_bdms = self._get_instance_bdms(bdms, server)
self._extend_server(context, server, req, instance_bdms)
def _get_instance_bdms(self, bdms, server):
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in the 'detail' or 'show' method.
# If that instance has since been deleted, it won't be in the
# 'bdms' dictionary though, so use 'get' to avoid KeyErrors.
return bdms.get(server['id'], [])
class ExtendedVolumes(extensions.V21APIExtensionBase):
"""Extended Volumes support."""
name = "ExtendedVolumes"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = ExtendedVolumesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
stevenbrichards/boto | boto/dynamodb/types.py | 97 | 12477 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Some utility functions to deal with mapping Amazon DynamoDB types to
Python types and vice-versa.
"""
import base64
from decimal import (Decimal, DecimalException, Context,
Clamped, Overflow, Inexact, Underflow, Rounded)
from collections import Mapping
from boto.dynamodb.exceptions import DynamoDBNumberError
from boto.compat import filter, map, six, long_type
DYNAMODB_CONTEXT = Context(
Emin=-128, Emax=126, rounding=None, prec=38,
traps=[Clamped, Overflow, Inexact, Rounded, Underflow])
# python2.6 cannot convert floats directly to
# Decimals. This is taken from:
# http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq
def float_to_decimal(f):
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
ctx = DYNAMODB_CONTEXT
result = ctx.divide(numerator, denominator)
while ctx.flags[Inexact]:
ctx.flags[Inexact] = False
ctx.prec *= 2
result = ctx.divide(numerator, denominator)
return result
def is_num(n, boolean_as_int=True):
if boolean_as_int:
types = (int, long_type, float, Decimal, bool)
else:
types = (int, long_type, float, Decimal)
return isinstance(n, types) or n in types
if six.PY2:
def is_str(n):
return (isinstance(n, basestring) or
isinstance(n, type) and issubclass(n, basestring))
def is_binary(n):
return isinstance(n, Binary)
else: # PY3
def is_str(n):
return (isinstance(n, str) or
isinstance(n, type) and issubclass(n, str))
def is_binary(n):
return isinstance(n, bytes) # Binary is subclass of bytes.
def serialize_num(val):
"""Cast a number to a string and perform
validation to ensure no loss of precision.
"""
if isinstance(val, bool):
return str(int(val))
return str(val)
def convert_num(s):
if '.' in s:
n = float(s)
else:
n = int(s)
return n
def convert_binary(n):
return Binary(base64.b64decode(n))
def get_dynamodb_type(val, use_boolean=True):
"""
Take a scalar Python value and return a string representing
the corresponding Amazon DynamoDB type. If the value passed in is
not a supported type, raise a TypeError.
"""
dynamodb_type = None
if val is None:
dynamodb_type = 'NULL'
elif is_num(val):
if isinstance(val, bool) and use_boolean:
dynamodb_type = 'BOOL'
else:
dynamodb_type = 'N'
elif is_str(val):
dynamodb_type = 'S'
elif isinstance(val, (set, frozenset)):
if False not in map(is_num, val):
dynamodb_type = 'NS'
elif False not in map(is_str, val):
dynamodb_type = 'SS'
elif False not in map(is_binary, val):
dynamodb_type = 'BS'
elif is_binary(val):
dynamodb_type = 'B'
elif isinstance(val, Mapping):
dynamodb_type = 'M'
elif isinstance(val, list):
dynamodb_type = 'L'
if dynamodb_type is None:
msg = 'Unsupported type "%s" for value "%s"' % (type(val), val)
raise TypeError(msg)
return dynamodb_type
def dynamize_value(val):
"""
Take a scalar Python value and return a dict consisting
of the Amazon DynamoDB type specification and the value that
needs to be sent to Amazon DynamoDB. If the type of the value
is not supported, raise a TypeError
"""
dynamodb_type = get_dynamodb_type(val)
if dynamodb_type == 'N':
val = {dynamodb_type: serialize_num(val)}
elif dynamodb_type == 'S':
val = {dynamodb_type: val}
elif dynamodb_type == 'NS':
val = {dynamodb_type: list(map(serialize_num, val))}
elif dynamodb_type == 'SS':
val = {dynamodb_type: [n for n in val]}
elif dynamodb_type == 'B':
if isinstance(val, bytes):
val = Binary(val)
val = {dynamodb_type: val.encode()}
elif dynamodb_type == 'BS':
val = {dynamodb_type: [n.encode() for n in val]}
return val
if six.PY2:
class Binary(object):
def __init__(self, value):
if not isinstance(value, (bytes, six.text_type)):
raise TypeError('Value must be a string of binary data!')
if not isinstance(value, bytes):
value = value.encode("utf-8")
self.value = value
def encode(self):
return base64.b64encode(self.value).decode('utf-8')
def __eq__(self, other):
if isinstance(other, Binary):
return self.value == other.value
else:
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Binary(%r)' % self.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
else:
class Binary(bytes):
def encode(self):
return base64.b64encode(self).decode('utf-8')
@property
def value(self):
# This matches the public API of the Python 2 version,
# but just returns itself since it is already a bytes
# instance.
return bytes(self)
def __repr__(self):
return 'Binary(%r)' % self.value
def item_object_hook(dct):
"""
A custom object hook for use when decoding JSON item bodys.
This hook will transform Amazon DynamoDB JSON responses to something
that maps directly to native Python types.
"""
if len(dct.keys()) > 1:
return dct
if 'S' in dct:
return dct['S']
if 'N' in dct:
return convert_num(dct['N'])
if 'SS' in dct:
return set(dct['SS'])
if 'NS' in dct:
return set(map(convert_num, dct['NS']))
if 'B' in dct:
return convert_binary(dct['B'])
if 'BS' in dct:
return set(map(convert_binary, dct['BS']))
return dct
class Dynamizer(object):
"""Control serialization/deserialization of types.
This class controls the encoding of python types to the
format that is expected by the DynamoDB API, as well as
taking DynamoDB types and constructing the appropriate
python types.
If you want to customize this process, you can subclass
this class and override the encoding/decoding of
specific types. For example::
'foo' (Python type)
|
v
encode('foo')
|
v
_encode_s('foo')
|
v
{'S': 'foo'} (Encoding sent to/received from DynamoDB)
|
V
decode({'S': 'foo'})
|
v
_decode_s({'S': 'foo'})
|
v
'foo' (Python type)
"""
def _get_dynamodb_type(self, attr):
return get_dynamodb_type(attr)
def encode(self, attr):
"""
Encodes a python type to the format expected
by DynamoDB.
"""
dynamodb_type = self._get_dynamodb_type(attr)
try:
encoder = getattr(self, '_encode_%s' % dynamodb_type.lower())
except AttributeError:
raise ValueError("Unable to encode dynamodb type: %s" %
dynamodb_type)
return {dynamodb_type: encoder(attr)}
def _encode_n(self, attr):
try:
if isinstance(attr, float) and not hasattr(Decimal, 'from_float'):
# python2.6 does not support creating Decimals directly
# from floats so we have to do this ourself.
n = str(float_to_decimal(attr))
else:
n = str(DYNAMODB_CONTEXT.create_decimal(attr))
if list(filter(lambda x: x in n, ('Infinity', 'NaN'))):
raise TypeError('Infinity and NaN not supported')
return n
except (TypeError, DecimalException) as e:
msg = '{0} numeric for `{1}`\n{2}'.format(
e.__class__.__name__, attr, str(e) or '')
raise DynamoDBNumberError(msg)
def _encode_s(self, attr):
if isinstance(attr, bytes):
attr = attr.decode('utf-8')
elif not isinstance(attr, six.text_type):
attr = str(attr)
return attr
def _encode_ns(self, attr):
return list(map(self._encode_n, attr))
def _encode_ss(self, attr):
return [self._encode_s(n) for n in attr]
def _encode_b(self, attr):
if isinstance(attr, bytes):
attr = Binary(attr)
return attr.encode()
def _encode_bs(self, attr):
return [self._encode_b(n) for n in attr]
def _encode_null(self, attr):
return True
def _encode_bool(self, attr):
return attr
def _encode_m(self, attr):
return dict([(k, self.encode(v)) for k, v in attr.items()])
def _encode_l(self, attr):
return [self.encode(i) for i in attr]
def decode(self, attr):
"""
Takes the format returned by DynamoDB and constructs
the appropriate python type.
"""
if len(attr) > 1 or not attr:
return attr
dynamodb_type = list(attr.keys())[0]
if dynamodb_type.lower() == dynamodb_type:
# It's not an actual type, just a single character attr that
# overlaps with the DDB types. Return it.
return attr
try:
decoder = getattr(self, '_decode_%s' % dynamodb_type.lower())
except AttributeError:
return attr
return decoder(attr[dynamodb_type])
def _decode_n(self, attr):
return DYNAMODB_CONTEXT.create_decimal(attr)
def _decode_s(self, attr):
return attr
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))
def _decode_ss(self, attr):
return set(map(self._decode_s, attr))
def _decode_b(self, attr):
return convert_binary(attr)
def _decode_bs(self, attr):
return set(map(self._decode_b, attr))
def _decode_null(self, attr):
return None
def _decode_bool(self, attr):
return attr
def _decode_m(self, attr):
return dict([(k, self.decode(v)) for k, v in attr.items()])
def _decode_l(self, attr):
return [self.decode(i) for i in attr]
class NonBooleanDynamizer(Dynamizer):
"""Casting boolean type to numeric types.
This class is provided for backward compatibility.
"""
def _get_dynamodb_type(self, attr):
return get_dynamodb_type(attr, use_boolean=False)
class LossyFloatDynamizer(NonBooleanDynamizer):
"""Use float/int instead of Decimal for numeric types.
This class is provided for backwards compatibility. Instead of
using Decimals for the 'N', 'NS' types it uses ints/floats.
This class is deprecated and its usage is not encouraged,
as doing so may result in loss of precision. Use the
`Dynamizer` class instead.
"""
def _encode_n(self, attr):
return serialize_num(attr)
def _encode_ns(self, attr):
return [str(i) for i in attr]
def _decode_n(self, attr):
return convert_num(attr)
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))
| mit |
synopat/pyload | module/plugins/hoster/MegaRapidoNet.py | 8 | 2155 | # -*- coding: utf-8 -*-
import random
from ..internal.MultiHoster import MultiHoster
def random_with_n_digits(n):
rand = "0."
not_zero = 0
for i in range(1, n + 1):
r = random.randint(0, 9)
if(r > 0):
not_zero += 1
rand += str(r)
if not_zero > 0:
return rand
else:
return random_with_n_digits(n)
class MegaRapidoNet(MultiHoster):
__name__ = "MegaRapidoNet"
__type__ = "hoster"
__version__ = "0.12"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?\w+\.megarapido\.net/\?file=\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", False),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
("revert_failed", "bool", "Revert to standard download if fails", True)]
__description__ = """MegaRapido.net multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Kagenoshin", "kagenoshin@gmx.ch")]
LINK_PREMIUM_PATTERN = r'<\s*?a[^>]*?title\s*?=\s*?["\'].*?download["\'][^>]*?href=["\']([^"\']+)'
ERROR_PATTERN = r'<\s*?div[^>]*?class\s*?=\s*?["\']?alert-message error.*?>([^<]*)'
def handle_premium(self, pyfile):
self.data = self.load("http://megarapido.net/gerar.php",
post={'rand': random_with_n_digits(16),
'urllist': pyfile.url,
'links': pyfile.url,
'exibir': "normal",
'usar': "premium",
'user': self.account.get_data('sid'),
'autoreset': ""})
if "desloga e loga novamente para gerar seus links" in self.data.lower():
self.error(_("You have logged in at another place"))
return MultiHoster.handle_premium(self, pyfile)
| gpl-3.0 |
ianscrivener/microservices-infrastructure | roles/docker/files/dockerplugin.py | 22 | 11810 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Collectd plugin for collecting docker container stats
#
# Copyright © 2015 eNovance
#
# Authors:
# Sylvain Baubeau <sylvain.baubeau@enovance.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Requirements: docker-py
import dateutil.parser
from distutils.version import StrictVersion
import docker
import json
import os
import threading
import time
import sys
def _c(c):
"""A helper method for representing a container in messages. If the given
argument is a string, it is assumed to be the container's ID and only the
first 7 digits will be returned. If it's a dictionary, the string returned
is <7-digit ID>/<name>."""
if type(c) == str or type(c) == unicode:
return c[:7]
return '{}/{}'.format(c['Id'][:7], c['Name'])
class Stats:
@classmethod
def emit(cls, container, type, value, t=None, type_instance=None):
val = collectd.Values()
val.plugin = 'docker'
val.plugin_instance = container['Name']
if type:
val.type = type
if type_instance:
val.type_instance = type_instance
if t:
val.time = time.mktime(dateutil.parser.parse(t).timetuple())
else:
val.time = time.time()
# With some versions of CollectD, a dummy metadata map must to be added
# to each value for it to be correctly serialized to JSON by the
# write_http plugin. See
# https://github.com/collectd/collectd/issues/716
val.meta = {'true': 'true'}
val.values = value
val.dispatch()
@classmethod
def read(cls, container, stats):
raise NotImplementedError
class BlkioStats(Stats):
@classmethod
def read(cls, container, stats, t):
for key, values in stats.items():
# Block IO stats are reported by block device (with major/minor
# numbers). We need to group and report the stats of each block
# device independently.
blkio_stats = {}
for value in values:
k = '{}-{}-{}'.format(key, value['major'], value['minor'])
if k not in blkio_stats:
blkio_stats[k] = []
blkio_stats[k].append(value['value'])
for type_instance, values in blkio_stats.items():
if len(values) == 5:
cls.emit(container, 'blkio', values,
type_instance=type_instance, t=t)
elif len(values) == 1:
# For some reason, some fields contains only one value and
# the 'op' field is empty. Need to investigate this
cls.emit(container, 'blkio.single', values,
type_instance=key, t=t)
else:
collectd.warn(('Unexpected number of blkio stats for '
'container {}!'.format(_c(container))))
class CpuStats(Stats):
@classmethod
def read(cls, container, stats, t):
cpu_usage = stats['cpu_usage']
percpu = cpu_usage['percpu_usage']
for cpu, value in enumerate(percpu):
cls.emit(container, 'cpu.percpu.usage', [value],
type_instance='cpu%d' % (cpu,), t=t)
items = sorted(stats['throttling_data'].items())
cls.emit(container, 'cpu.throttling_data', [x[1] for x in items], t=t)
values = [cpu_usage['total_usage'], cpu_usage['usage_in_kernelmode'],
cpu_usage['usage_in_usermode'], stats['system_cpu_usage']]
cls.emit(container, 'cpu.usage', values, t=t)
class NetworkStats(Stats):
@classmethod
def read(cls, container, stats, t):
items = stats.items()
items.sort()
cls.emit(container, 'network.usage', [x[1] for x in items], t=t)
class MemoryStats(Stats):
@classmethod
def read(cls, container, stats, t):
values = [stats['limit'], stats['max_usage'], stats['usage']]
cls.emit(container, 'memory.usage', values, t=t)
for key, value in stats['stats'].items():
cls.emit(container, 'memory.stats', [value],
type_instance=key, t=t)
class ContainerStats(threading.Thread):
"""
A thread that continuously consumes the stats stream from a container,
keeping the most recently read stats available for processing by CollectD.
Such a mechanism is required because the first read from Docker's stats API
endpoint can take up to one second. Hitting this endpoint for every
container running on the system would only be feasible if the number of
running containers was less than the polling interval of CollectD. Above
that, and the whole thing breaks down. It is thus required to maintain open
the stats stream and read from it, but because it is a continuous stream we
need to be continuously consuming from it to make sure that when CollectD
requests a plugin read, it gets the latest stats data from each container.
The role of this thread is to keep consuming from the stats endpoint (it's
a blocking stream read, getting stats data from the Docker daemon every
second), and make the most recently read data available in a variable.
"""
def __init__(self, container, client):
threading.Thread.__init__(self)
self.daemon = True
self.stop = False
self._container = container
self._client = client
self._feed = None
self._stats = None
# Automatically start stats reading thread
self.start()
def run(self):
collectd.info('Starting stats gathering for {}.'
.format(_c(self._container)))
while not self.stop:
try:
if not self._feed:
self._feed = self._client.stats(self._container)
self._stats = self._feed.next()
except Exception, e:
collectd.warning('Error reading stats from {}: {}'
.format(_c(self._container), e))
# Marking the feed as dead so we'll attempt to recreate it and
# survive transient Docker daemon errors/unavailabilities.
self._feed = None
collectd.info('Stopped stats gathering for {}.'
.format(_c(self._container)))
@property
def stats(self):
"""Wait, if needed, for stats to be available and return the most
recently read stats data, parsed as JSON, for the container."""
while not self._stats:
pass
return json.loads(self._stats)
class DockerPlugin:
"""
CollectD plugin for collecting statistics about running containers via
Docker's remote API /<container>/stats endpoint.
"""
DEFAULT_BASE_URL = 'unix://var/run/docker.sock'
DEFAULT_DOCKER_TIMEOUT = 5
# The stats endpoint is only supported by API >= 1.17
MIN_DOCKER_API_VERSION = '1.17'
CLASSES = {'network': NetworkStats,
'blkio_stats': BlkioStats,
'cpu_stats': CpuStats,
'memory_stats': MemoryStats}
def __init__(self, docker_url=None):
self.docker_url = docker_url or DockerPlugin.DEFAULT_BASE_URL
self.timeout = DockerPlugin.DEFAULT_DOCKER_TIMEOUT
self.capture = False
self.stats = {}
def configure_callback(self, conf):
for node in conf.children:
if node.key == 'BaseURL':
self.docker_url = node.values[0]
elif node.key == 'Timeout':
self.timeout = int(node.values[0])
def init_callback(self):
self.client = docker.Client(
base_url=self.docker_url,
version=DockerPlugin.MIN_DOCKER_API_VERSION)
self.client.timeout = self.timeout
# Check API version for stats endpoint support.
try:
version = self.client.version()['ApiVersion']
if StrictVersion(version) < \
StrictVersion(DockerPlugin.MIN_DOCKER_API_VERSION):
raise Exception
except:
collectd.warning(('Docker daemon at {} does not '
'support container statistics!')
.format(self.docker_url))
return False
collectd.register_read(self.read_callback)
collectd.info(('Collecting stats about Docker containers from {} '
'(API version {}; timeout: {}s).')
.format(self.docker_url, version, self.timeout))
return True
def read_callback(self):
containers = [c for c in self.client.containers()
if c['Status'].startswith('Up')]
# Terminate stats gathering threads for containers that are not running
# anymore.
for cid in set(self.stats) - set(map(lambda c: c['Id'], containers)):
self.stats[cid].stop = True
del self.stats[cid]
for container in containers:
try:
container['Name'] = container['Names'][0][1:]
# Start a stats gathering thread if the container is new.
if container['Id'] not in self.stats:
self.stats[container['Id']] = ContainerStats(container,
self.client)
# Get and process stats from the container.
stats = self.stats[container['Id']].stats
for key, value in stats.items():
klass = self.CLASSES.get(key)
if klass:
klass.read(container, value, stats['read'])
except Exception, e:
collectd.warning('Error getting stats for container {}: {}'
.format(_c(container), e))
# Command-line execution
if __name__ == '__main__':
class ExecCollectdValues:
def dispatch(self):
if not getattr(self, 'host', None):
self.host = os.environ.get('COLLECTD_HOSTNAME', 'localhost')
identifier = '%s/%s' % (self.host, self.plugin)
if getattr(self, 'plugin_instance', None):
identifier += '-' + self.plugin_instance
identifier += '/' + self.type
if getattr(self, 'type_instance', None):
identifier += '-' + self.type_instance
print 'PUTVAL', identifier, \
':'.join(map(str, [int(self.time)] + self.values))
class ExecCollectd:
def Values(self):
return ExecCollectdValues()
def warning(self, msg):
print 'WARNING:', msg
def info(self, msg):
print 'INFO:', msg
collectd = ExecCollectd()
plugin = DockerPlugin()
if len(sys.argv) > 1:
plugin.docker_url = sys.argv[1]
if plugin.init_callback():
plugin.read_callback()
# Normal plugin execution via CollectD
else:
import collectd
plugin = DockerPlugin()
collectd.register_config(plugin.configure_callback)
collectd.register_init(plugin.init_callback)
| apache-2.0 |
sheepeatingtaz/django-graphos | demo_project/demo_project/settings/common.py | 5 | 5026 | # Django settings for demo_project project.
import os
import sys
PROJECT_DIR = os.getcwd()
PYTHON_BIN = os.path.dirname(sys.executable)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Shabda Raaj', 'shabda@agiliq.com'),
('Bala Subrahmanyam Varanasi', 'balu@agiliq.com')
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'demo_project_database.db',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Kolkata'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static/')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = 'static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j-!4__9gp4=s_&d=tyj6cnt2ueay%7u-6nv0@o9_@lg!xv)%5b'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'demo_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'demo_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'demo',
'graphos'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-2-clause |
brodyfuchs/EnvAnalysis | thermodynamics.py | 1 | 19626 | from numpy import *
"""
A series of functions to calculate thermodynamic variables
compiled by Brett Basarab
Colorado State University
bbasarab [at] atmos.colostate.edu
last updated April 2015
Unless otherwise indicated, most of these functions were written
by Thomas Chubb and downloaded as part of a free python skew-T plotting
package
Most functions take arguments in standard SI units:
temperature in K
pressure in Pa
mixing ratio in kg/kg
Some take temperature in Celsius
"""
#-----------------------------------------------------------------------
# Here we go. A set of functions that I use from time to time to calculate
# the basic stuff that I'm sick of doing over and over! I'm going to
# endeavour to include references and global constants to make it all nice
# and legible.
#-----------------------------------------------------------------------
Rs_da=287.05 # Specific gas const for dry air, J/kg/K
Rs_v=461.51 # Specific gas const for water vapour, J/kg/K
Cp_da=1004.6 # Specific heat at constant pressure for dry air
Cv_da=719. # Specific heat at constant volume for dry air
Cp_v=1870. # Specific heat at constant pressure for water vapour
Cv_v=1410. # Specific heat at constant volume for water vapour
Cp_lw=4218 # Specific heat at constant pressure for liquid water
Epsilon=0.622 # Epsilon=Rs_da/Rs_v; The ratio of the gas constants
degCtoK=273.15 # Temperature offset between K and C (deg C)
rho_w=1000. # Liquid Water density kg m^{-3}
grav=9.81 # Gravity, m s^{-2}
Lv=2.5e6 # Latent Heat of vaporisation
boltzmann=5.67e-8 # Stefan-Boltzmann constant
mv=18.0153 # Mean molar mass of water vapor(g/mol)
_devel="working"
def Cel2K(tempc):
return tempc+273.15
def Cel2F(tempc):
return tempc*(9/5.)+32.
def F2K(tempf):
return 273.15+(5/9.)*(tempf-32.)
def F2Cel(tempf):
return (5/9.)*(tempf-32.)
def Theta(tempk,pres,pref=100000.):
"""Potential Temperature
INPUTS:
tempk (K)
pres (Pa)
pref: Reference pressure (default 100000 Pa)
OUTPUTS: Theta (K)
Source: Wikipedia
Prints a warning if a pressure value below 2000 Pa input, to ensure
that the units were input correctly.
"""
try:
minpres=min(pres)
except TypeError:
minpres=pres
if minpres<2000:
print "WARNING: P<2000 Pa; did you input a value in hPa?"
return tempk*(pref/pres)**(Rs_da/Cp_da)
def TempK(theta,pres,pref=100000.):
"""Inverts Theta function. i.e, gets temperature at pres
based on theta."""
try:
minpres=min(pres)
except TypeError:
minpres=pres
if minpres<2000:
print "WARNING: P<2000 Pa; did you input a value in hPa?"
return theta*(pres/pref)**(Rs_da/Cp_da)
def ThetaE_R():
"""Equivalent potential temperature:
Exact equation for reversible processes"""
raise NotImplementedError
def ThetaE_Pseudo():
"""Equivalent potential temperature:
Empirical formula valid for pseudo adiabatic processes
from Bolton (1980)"""
raise NotImplementedError
def ThetaE_App(tempk,pres,w,pref=100000.):
"""Equivalent potential temperature: approximate formula"""
theta = Theta(tempk,pres,pref=pref)
theta_e = theta*exp(Lv*w/(Cp_da*tempk))
return theta_e
def ThetaES(tempk,pres,pref=100000.):
"""Saturated equivalent potential temperature: approximate formula"""
theta = Theta(tempk,pres,pref=pref)
ws = SatMixRatio(tempk,pres)
theta_es = theta*exp(Lv*ws/(Cp_da*tempk))
return theta_es
def ThetaV(tempk,pres,e):
"""Virtual Potential Temperature
INPUTS
tempk (K)
pres (Pa)
e: Water vapour pressure (Pa) (Optional)
"""
mixr=MixRatio(e,pres)
theta=Theta(tempk,pres)
return theta*(1+mixr/Epsilon)/(1+mixr)
# DSE, MSE, and SMSE functions written by Brett Basarab
def DSE(tempk,height):
"""Dry static energy"""
return Cp_da*tempk+grav*height
def MSE(tempk,height,w):
"""Moist static energy"""
return Cp_da*tempk+grav*height+Lv*w
def SMSE(tempk,height,pres):
"""Saturation moist static energy"""
ws = SatMixRatio(tempk,pres) # approximate formula
return Cp_da*tempk+grav*height+Lv*ws
def GammaW(tempk,pres,e=None):
"""Function to calculate the moist adiabatic lapse rate (deg C/Pa) based
on the temperature, pressure, and rh of the environment.
INPUTS:
tempk (K)
pres (Pa)
RH (%)
RETURNS:
GammaW: The moist adiabatic lapse rate (Dec C/Pa)
"""
tempc=tempk-degCtoK
es=SatVap(tempc)
ws=MixRatio(es,pres)
if e is None:
# assume saturated
e=es
w=MixRatio(e,pres)
#tempk = tempc+degCtoK
tempv=VirtualTempFromMixR(tempk,w)
latent=Latentc(tempc)
A=1.0+latent*ws/(Rs_da*tempk)
B=1.0+Epsilon*latent*latent*ws/(Cp_da*Rs_da*tempk*tempk)
Rho=pres/(Rs_da*tempv)
Gamma=(A/B)/(Cp_da*Rho)
return Gamma
def DensMoist(tempk,pres,mixr):
"""Density of moist air"""
virtualT=VirtualTempFromMixR(tempk,mixr)
return pres/(Rs_da*virtualT)
def VirtualTemp(tempk,pres,e):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
e: vapour pressure (Pa)
p: static pressure (Pa)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia)."""
tempvk=tempk/(1-(e/pres)*(1-Epsilon))
return tempvk
def VirtualTempFromMixR(tempk,mixr):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
mixr: Mixing Ratio (kg/kg)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia). This is an approximation
based on a m
"""
return tempk*(1.0+0.6*mixr)
def Latentc(tempc):
"""Latent heat of condensation (vapourisation)
INPUTS:
tempc (C)
OUTPUTS:
L_w (J/kg)
SOURCE:
http://en.wikipedia.org/wiki/Latent_heat#Latent_heat_for_condensation_of_water
"""
return 1000*(2500.8 - 2.36*tempc + 0.0016*tempc**2 - 0.00006*tempc**3)
def SatVap(tempc,phase="liquid"):
"""Calculate saturation vapour pressure over liquid water and/or ice.
INPUTS:
tempc: (C)
phase: ['liquid'],'ice'. If 'liquid', do simple dew point. If 'ice',
return saturation vapour pressure as follows:
Tc>=0: es = es_liquid
Tc <0: es = es_ice
RETURNS: e_sat (Pa)
SOURCE: http://cires.colorado.edu/~voemel/vp.html (#2:
CIMO guide (WMO 2008), modified to return values in Pa)
This formulation is chosen because of its appealing simplicity,
but it performs very well with respect to the reference forms
at temperatures above -40 C. At some point I'll implement Goff-Gratch
(from the same resource).
"""
over_liquid=6.112*exp(17.67*tempc/(tempc+243.12))*100.
over_ice=6.112*exp(22.46*tempc/(tempc+272.62))*100.
# return where(tempc<0,over_ice,over_liquid)
if phase=="liquid":
# return 6.112*exp(17.67*tempc/(tempc+243.12))*100.
return over_liquid
elif phase=="ice":
# return 6.112*exp(22.46*tempc/(tempc+272.62))*100.
return where(tempc<0,over_ice,over_liquid)
else:
raise NotImplementedError
def MixRatio(e,pres):
"""Mixing ratio of water vapour
INPUTS
e (Pa) Water vapor pressure
pres (Pa) Ambient pressure
RETURNS
w (kg/kg) Water vapor mixing ratio`
"""
return Epsilon*e/(pres-e)
def SatMixRatio(tempk,pres):
"""Calculate saturation mixing ratio of water
vapor with respect to liquid water, given
temperature and pressure"""
tempc = tempk-degCtoK
es = SatVap(tempc)
return Epsilon*es/(pres-es) # approximate formula
def MixR2VaporPress(w,pres):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
w (kg kg^-1) Water vapor mixing ratio`
pres (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return w*pres/(Epsilon+w)
def MixR2Q(w):
"""Simple conversion from mixing ratio to specific
humidity so I don't forget the difference between them"""
q = w/(w+1.)
return q
def Dwpt2VapPres(dwpt):
"""Water vapor pressure
INPUTS
dwpt (C) Dew Point Temperature (for SATURATION vapor
pressure use tempk)
RETURNS
e (Pa) Water Vapor Pressure
SOURCE:
Bolton, Monthly Weather Review, 1980, p 1047, eq. (10)
"""
return 611.2*exp(17.67*dwpt/(243.5+dwpt))
def VapPres2Dwpt(e):
""" Use Bolton's (1980, MWR, p1047) formulae to find tdew.
INPUTS:
e (Pa) Water Vapor Pressure
OUTPUTS:
Td (C)
"""
ln_ratio=log(e/611.2)
Td=((17.67-ln_ratio)*degCtoK+243.5*ln_ratio)/(17.67-ln_ratio)
return Td-degCtoK
def interp_parcel_path(pres_s,pres,temp,height,p,tdry,pwet,twet):
"""
Interpolates the output from lift_parcel in the SkewT module in order to calculate
a parcel temperature at each vertical level of your domain. <pres>, <temp>, and <height>
may go upward from surface (so <pres>,<temp> decrease, <height> increases) or downward from
top of domain. (But make sure all three of these arrays go in the same direction). If pressure
array was input in Pa units, will convert to hPa.
Arguments (all are required):
pres_s: surface pressure (hPa)
pres: your pressure array (hPa)
temp: your temperature array (Celsius)
height: your height array (meters)
p: pressures along dry adiabat output by SkewT.lift_parcel (hPa)
tdry: temperatures along dry adiabat output by SkewT.lift_parcel (Celsius)
pwet: pressures along moist adiabat output by SkewT.lift_parcel (hPa)
twet: temperatures along moist adiabat output by SkewT.lift_parcel (Celsius)
Returns:
parcel_temp: array of parcel temperatures interpolated to your vertical grid;
will go from top of domain to surface
h_el,p_el: height (meters) and pressure (hPa) of the equilibrium level (EL)
h_lfc,p_lfc: height (meters) and pressure (hPa) of the level of free convection (LFC)
h_frz,p_frz: height (meters) and pressure (hPa) of the environmental freezing level
h_frz_parc,p_frz_parc: height (meters) and pressure (hPa) of the parcel path freezing level
e,l,frz: indices of your arrays corresponding to the EL, LFC, and environmental freezing level
"""
# the calculations below operate on arrays going from top of domain to surface
# so check the direction of input arrays (<pres>,<temp>,<height>) here
if all(diff(pres)<=0): # arrays start at surface so flip them
pres = pres[::-1]
temp = temp[::-1]
height = height[::-1]
if pres.max()>1500.:
print 'MAX PRESSURE IS > 1500. ASSUMING PRESSURE WAS INPUT IN PA, CONVERTING TO HPA'
pres = pres/100.
# because of how pwet and twet are set up, exlude all indices above 100-hPa
cutoff_pres = pres>=100.
pres = pres[cutoff_pres]
temp = temp[cutoff_pres]
height = height[cutoff_pres]
p_lcl = pwet[0] # pwet[0] and twet[0] are pressure and temperature of the LCL
h_lcl = interp(p_lcl,pres,height)
print 'p_lcl, h_lcl:', p_lcl, h_lcl
atm = pres <= pres_s # finding pres elements that are above the surface
p_atm = pres[atm]
pres = pres[atm]
temp = temp[atm]
height = height[atm]
dry = where(p_atm >= p_lcl) # indices of pressure levels below LCL (higher pressure than LCL)
moist = where(p_atm < p_lcl) # indices of pressure levels above LCL
parcel_temp = zeros(p_atm.shape[0]) # (degrees C); levels below surface will be left at zero
# below LCL (parcel follows dry adiabat)
# interpolate output from lift_parcel to your model or observational levels
for d in dry[0]: # assign dry temperatures from LCL downward
p_diff = p-p_atm[d] # 100-element pressure array minus element of interest in your pres array
below = where(p_diff >= 0)[0][-1] # index of closest pressure in p level just below the LCL
above = below + 1 # index of closest pressure level in p just above LCL
# parcel temp follows dry adiabat below LCL
# get dry parcel temperatures by interpolating on the function tdry(p)
# p and tdry are decreasing, np.interp only works on increasing arrays (hence the above then below ordering)
parcel_temp[d] = interp(p_atm[d], [p[above], p[below]], [tdry[above], tdry[below]])
# can't interpolate on the end; parcel_temp is an increasing array (goes from top of domain),
# so just make the first element be the highest-up wet parcel path temperature
# OK that parcel_temp is increasing because so is temp
parcel_temp[0] = twet[-1] # parcel_temp increases, and can't interpolate on the end;
# above LCL (parcel follows moist adiabat)
for m in moist[0][1:]: # assign wet temperatures from top of domain downward
p_diff = pwet-p_atm[m]
below = where(p_diff >= 0)[0][-1] # closest pressure level just below the LCL
above = below + 1
# parcel temp follows moist adiabat above LCL
parcel_temp[m] = interp(p_atm[m], [pwet[above], pwet[below]], [twet[above], twet[below]])
p_el,h_el,p_lfc,h_lfc = 0,0,0,0 # initialize these variables
# find the equilibrium level (EL)
for i in range(temp.shape[0]):
t_diff = temp[i] - parcel_temp[i]
#print temp[i],parcel_temp[i],'t_diff: ',t_diff
# this condition seems backwards for finding the EL, but remember that temp and parcel_temp start at
# TOP of domain. So you're finding FIRST instance of parcel being warmer or equal temperature to environment,
if t_diff <= 0:
print 'FOUND EL'
below = i
e = i # index of EL
break # break out of the loop
else: # haven't found EL, means EL, LFC, and therefore CAPE quantities are undefined
below = i
e = i
if e==temp.shape[0]-1: # didn't find an EL
l = e # set LFC index to EL index so that CAPE calculation will be zero
p_el,h_el,p_lfc,h_lfc = -1,-1,-1,-1 # set these values to undefined
print 'EL NOT FOUND'
#print 'for EL: below, e: ',below,e
if (p_el!=-1) and (h_el!=-1): # MEANS THAT EL HAS BEEN FOUND
# interpolate on function P(T-T_parcel); find pressure where temp-parcel_temp = 0
# if t_diff = 0, then the interpolation is trivial, below is the index of your EL
# if t_diff < 0, then you just a little bit below the EL; interpolate between (temp[below]-parcel_temp[below]),
# and (temp[below-1]-parcel_temp[below-1])
p_el = interp(0, [temp[below]-parcel_temp[below], temp[below-1]-parcel_temp[below-1]],
[p_atm[below],p_atm[below-1]]) # pressure of equilibrium level
#print 'heights near EL: ', height[below],height[below-1]
h_el = interp(0, [temp[below]-parcel_temp[below], temp[below-1]-parcel_temp[below-1]],
[height[below],height[below-1]]) # height of equilibrium level
if p_el<p_lcl: #### MEANS THAT EL IS PHYSICAL
### CALCULATE PRESSURE AND HEIGHT OF THE LEVEL OF FREE CONVECTION
for i in range(below, temp.shape[0]):
t_diff = temp[i] - parcel_temp[i]
#print 't_diff: ',t_diff
# now, starting from the EL, move downward in height space to find the first instance of parcel temperature
# being cooler or equal temperature to environment, i.e., the LFC
found_lfc = 0
if t_diff >= 0: # parcel is cooler than environment
#print 'found LFC'
found_lfc=1
below = i
l = i # index of LFC
break
else: # haven't found the LFC, means parcel is warmer than env. all the way to surface
below = i
l = i
#print 'for LFC: below, l, p_atm.shape: ',below,l,p_atm.shape[0]
#print 'temp.shape, parcel_temp.shape: ',temp.shape[0],parcel_temp.shape[0]
#print 'parcel_temp: ',parcel_temp
if l == p_atm.shape[0]-1: # got to end of array, no LFC found
p_lfc = p_lcl
h_lfc = h_lcl
#elif found_lfc == 0:
#p_lfc = p_lcl
#h_lfc = h_lcl
else: #
p_lfc = interp(0, [temp[below-1]-parcel_temp[below-1], temp[below]-parcel_temp[below]],
[p_atm[below-1],p_atm[below]])
# h_lfc = interp(0, [temp[below-1]-parcel_temp[below-1], temp[below]-parcel_temp[below]],
# [ht[below-1],ht[below]])
h_lfc = interp(p_lfc, pres, height)
else: #### EL IS NOT PHYSICAL
p_el,h_el,p_lfc,h_lfc = -1,-1,-1,-1 # set these values to undefined
l = e # set LFC index to EL index so that CAPE calculation will be zero
print 'p_el, h_el: ',p_el,h_el
print 'p_lfc, h_lfc: ',p_lfc,h_lfc
# find height of the (environmental) freezing level
frz = where(abs(temp) == abs(temp).min())[0][0]
p_frz = pres[frz]
h_frz = height[frz]
# find the height of the (parcel path) freezing level
i_frz_parc = where(abs(parcel_temp[pres<=pres_s]) == abs(parcel_temp[pres<=pres_s]).min())[0][0]
h_frz_parc = height[i_frz_parc]
p_frz_parc = pres[i_frz_parc]
#print 'DONE INTERPOLATING PARCEL PATH!!\n\n'
# returns array of parcel temperatures, pressure and height of LFC,
#pressure and height of EL, index of freezing height, LFC, and EL
return parcel_temp,p_lcl,h_lcl,p_lfc,h_lfc,p_el,h_el,p_frz,h_frz,p_frz_parc,h_frz_parc,frz,l,e
def calc_cape(temp,height,parcel_temp,h_lfc,l,h_el,e,frz):
"""
This function calculates the convective available potential energy (CAPE) between the level
of free convection and the equilibrium level. It also calculates a "warm" CAPE value (CAPE
below the freezing level). <temp>, <height>, and <parcel_temp> may go upward from surface
(so <temp>, <parcel_temp> decrease, <height> increases) or downward from top of domain. (But
make sure all three of these arrays go in the same direction).
Arguments:
temp: array of temperatures (Celsius)
height: array of heights (meters)
parcel_temp: array of parcel temperatures interpolated to <temp>, <height> grid (Celsius)
h_lfc: height of the LFC (meters)
l: index of <parcel_temp>-<temp> closest the LFC
h_el: height of the EL (meters)
e: index <parcel_temp>-<temp> closest to the EL
frz: index closest to the freezing level
"""
# the calculations below operate on arrays going from top of domain to surface
# so check the direction of input arrays (<pres>,<temp>,<height>) here
if all(diff(height)>0): # arrays start at surface so flip them
temp = temp[::-1]
height = height[::-1]
parcel_temp = parcel_temp[::-1]
### CAPE CALCULATIONS ###
cape = 0.
warm_cape = 0.
if l > e: # NEED TO MAKE SURE THERE IS A REASONABLE EL
print 'l > e; calculating CAPE'
for i in range(e,l-1): # descretized CAPE calculation: looping from EL to LFC to get CAPE
cape += grav*(parcel_temp[i]-temp[i])*(height[i]-height[i+1])/(temp[i]+degCtoK)
if i >= frz: # CAPE below the freezing level woot!!
warm_cape += grav*(parcel_temp[i]-temp[i])*(height[i]-height[i+1])/(temp[i]+degCtoK)
# ADD ON A LITTLE NEAR THE EL
el_cape = grav*(parcel_temp[e]-temp[e])*(h_el-height[e])/(temp[e]+degCtoK)
# SUBTRACT OFF ANY BELOW THE LFC
lfc_cape = grav*(parcel_temp[l]-temp[l])*(h_lfc-height[l])/(temp[l]+degCtoK)
print 'EL CAPE, LFC CAPE:',el_cape,lfc_cape
if (abs(el_cape) > 1000) | (abs(lfc_cape) > 1000):
print 'BAD near-EL or near-LFC CAPE values; not including'
else:
cape += el_cape
cape += lfc_cape
warm_cape += lfc_cape
del lfc_cape # delete this variable
del el_cape
if cape < 0: cape = 0
if cape!=0.:
ncape = cape/(h_el-h_lfc)
else:
ncape = 0.
return cape,ncape,warm_cape
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.