repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
gsobczyk/hamster | waflib/extras/bjam.py | 60 | 3893 | #! /usr/bin/env python
# per rosengren 2011
from os import sep, readlink
from waflib import Logs
from waflib.TaskGen import feature, after_method
from waflib.Task import Task, always_run
def options(opt):
grp = opt.add_option_group('Bjam Options')
grp.add_option('--bjam_src', default=None, help='You can find it in <boost root>/tools/jam/src')
grp.add_option('--bjam_uname', default='linuxx86_64', help='bjam is built in <src>/bin.<uname>/bjam')
grp.add_option('--bjam_config', default=None)
grp.add_option('--bjam_toolset', default=None)
def configure(cnf):
if not cnf.env.BJAM_SRC:
cnf.env.BJAM_SRC = cnf.options.bjam_src
if not cnf.env.BJAM_UNAME:
cnf.env.BJAM_UNAME = cnf.options.bjam_uname
try:
cnf.find_program('bjam', path_list=[
cnf.env.BJAM_SRC + sep + 'bin.' + cnf.env.BJAM_UNAME
])
except Exception:
cnf.env.BJAM = None
if not cnf.env.BJAM_CONFIG:
cnf.env.BJAM_CONFIG = cnf.options.bjam_config
if not cnf.env.BJAM_TOOLSET:
cnf.env.BJAM_TOOLSET = cnf.options.bjam_toolset
@feature('bjam')
@after_method('process_rule')
def process_bjam(self):
if not self.bld.env.BJAM:
self.create_task('bjam_creator')
self.create_task('bjam_build')
self.create_task('bjam_installer')
if getattr(self, 'always', False):
always_run(bjam_creator)
always_run(bjam_build)
always_run(bjam_installer)
class bjam_creator(Task):
ext_out = 'bjam_exe'
vars=['BJAM_SRC', 'BJAM_UNAME']
def run(self):
env = self.env
gen = self.generator
bjam = gen.bld.root.find_dir(env.BJAM_SRC)
if not bjam:
Logs.error('Can not find bjam source')
return -1
bjam_exe_relpath = 'bin.' + env.BJAM_UNAME + '/bjam'
bjam_exe = bjam.find_resource(bjam_exe_relpath)
if bjam_exe:
env.BJAM = bjam_exe.srcpath()
return 0
bjam_cmd = ['./build.sh']
Logs.debug('runner: ' + bjam.srcpath() + '> ' + str(bjam_cmd))
result = self.exec_command(bjam_cmd, cwd=bjam.srcpath())
if not result == 0:
Logs.error('bjam failed')
return -1
bjam_exe = bjam.find_resource(bjam_exe_relpath)
if bjam_exe:
env.BJAM = bjam_exe.srcpath()
return 0
Logs.error('bjam failed')
return -1
class bjam_build(Task):
ext_in = 'bjam_exe'
ext_out = 'install'
vars = ['BJAM_TOOLSET']
def run(self):
env = self.env
gen = self.generator
path = gen.path
bld = gen.bld
if hasattr(gen, 'root'):
build_root = path.find_node(gen.root)
else:
build_root = path
jam = bld.srcnode.find_resource(env.BJAM_CONFIG)
if jam:
Logs.debug('bjam: Using jam configuration from ' + jam.srcpath())
jam_rel = jam.relpath_gen(build_root)
else:
Logs.warn('No build configuration in build_config/user-config.jam. Using default')
jam_rel = None
bjam_exe = bld.srcnode.find_node(env.BJAM)
if not bjam_exe:
Logs.error('env.BJAM is not set')
return -1
bjam_exe_rel = bjam_exe.relpath_gen(build_root)
cmd = ([bjam_exe_rel] +
(['--user-config=' + jam_rel] if jam_rel else []) +
['--stagedir=' + path.get_bld().path_from(build_root)] +
['--debug-configuration'] +
['--with-' + lib for lib in self.generator.target] +
(['toolset=' + env.BJAM_TOOLSET] if env.BJAM_TOOLSET else []) +
['link=' + 'shared'] +
['variant=' + 'release']
)
Logs.debug('runner: ' + build_root.srcpath() + '> ' + str(cmd))
ret = self.exec_command(cmd, cwd=build_root.srcpath())
if ret != 0:
return ret
self.set_outputs(path.get_bld().ant_glob('lib/*') + path.get_bld().ant_glob('bin/*'))
return 0
class bjam_installer(Task):
ext_in = 'install'
def run(self):
gen = self.generator
path = gen.path
for idir, pat in (('${LIBDIR}', 'lib/*'), ('${BINDIR}', 'bin/*')):
files = []
for n in path.get_bld().ant_glob(pat):
try:
t = readlink(n.srcpath())
gen.bld.symlink_as(sep.join([idir, n.name]), t, postpone=False)
except OSError:
files.append(n)
gen.bld.install_files(idir, files, postpone=False)
return 0
| gpl-3.0 |
bofm/emmet-sublime | emmet/semver.py | 19 | 2471 | # -*- coding: utf-8 -*-
import re
_REGEX = re.compile('^(?P<major>[0-9]+)'
'\.(?P<minor>[0-9]+)'
'(\.(?P<patch>[0-9]+))?'
'(\-(?P<prerelease>[0-9A-Za-z]+(\.[0-9A-Za-z]+)*))?'
'(\+(?P<build>[0-9A-Za-z]+(\.[0-9A-Za-z]+)*))?$')
if 'cmp' not in __builtins__:
cmp = lambda a,b: (a > b) - (a < b)
def parse(version):
"""
Parse version to major, minor, patch, pre-release, build parts.
"""
match = _REGEX.match(version)
if match is None:
raise ValueError('%s is not valid SemVer string' % version)
verinfo = match.groupdict()
verinfo['major'] = int(verinfo['major'])
verinfo['minor'] = int(verinfo['minor'])
verinfo['patch'] = int(verinfo['patch'] or '0')
return verinfo
def compare(ver1, ver2):
def nat_cmp(a, b):
a, b = a or '', b or ''
convert = lambda text: text.isdigit() and int(text) or text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return cmp(alphanum_key(a), alphanum_key(b))
def compare_by_keys(d1, d2):
for key in ['major', 'minor', 'patch']:
v = cmp(d1.get(key), d2.get(key))
if v:
return v
rc1, rc2 = d1.get('prerelease'), d2.get('prerelease')
build1, build2 = d1.get('build'), d2.get('build')
rccmp = nat_cmp(rc1, rc2)
buildcmp = nat_cmp(build1, build2)
if not (rc1 or rc2):
return buildcmp
elif not rc1:
return 1
elif not rc2:
return -1
return rccmp or buildcmp or 0
v1, v2 = parse(ver1), parse(ver2)
return compare_by_keys(v1, v2)
def match(version, match_expr):
prefix = match_expr[:2]
if prefix in ('>=', '<=', '=='):
match_version = match_expr[2:]
elif prefix and prefix[0] in ('>', '<', '='):
prefix = prefix[0]
match_version = match_expr[1:]
else:
raise ValueError("match_expr parameter should be in format <op><ver>, "
"where <op> is one of ['<', '>', '==', '<=', '>=']. "
"You provided: %r" % match_expr)
possibilities_dict = {
'>': (1,),
'<': (-1,),
'==': (0,),
'>=': (0, 1),
'<=': (-1, 0)
}
possibilities = possibilities_dict[prefix]
cmp_res = compare(version, match_version)
return cmp_res in possibilities
| mit |
cemc/cscircles-wp-content | lesson_files/graders/pendulum-grader.py | 1 | 1075 | # based on real-per-line.py
from _UTILITIES import _code
def f():
for i in range(0, len(ExpectedLines)):
if len(OutputLines)<=i:
return "N"+_("Not enough lines of output: {0} expected, {1} found").format(str(len(ExpectedLines)),
str(len(OutputLines)))
o = sfloat(OutputLines[i])
if (o==None): return "N"+_("Error: Output line {0} '{1}' was not a number").format(str(i+1), _code(OutputLines[i]))
if not realClose(float(ExpectedLines[i]), o):
return "N"+_("Output line {0}, value {1}, did not match expected value {2}").format(
str(i+1), OutputLines[i], ExpectedLines[i])
if len(OutputLines)>len(ExpectedLines):
return "N"+_("Too many lines of output: {0} expected, {1} found").format(
str(len(ExpectedLines)), str(len(OutputLines)))
res = "Y"+_("Correct!")+" "+_("Here is the plot of the output:")+"<pre>"
for i in range(0, len(ExpectedLines)):
res += "*"*int(0.5+sfloat(OutputLines[i]))+"\n"
return res+"</pre>"
print(f())
| gpl-3.0 |
xpansa/server-tools | qweb_usertime/__openerp__.py | 23 | 1508 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'QWeb User Time',
'version': '8.0.1.0.0',
'author': 'Savoir-faire Linux,Odoo Community Association (OCA)',
'maintainer': 'Odoo Community Association (OCA)',
'website': 'http://www.savoirfairelinux.com',
'license': 'AGPL-3',
'category': 'Tools',
'summary': 'Add user time rendering support in QWeb',
'depends': [
'base',
],
'external_dependencies': {
'python': [],
},
'data': [
],
'installable': True,
}
| agpl-3.0 |
justinpotts/mozillians | vendor-local/lib/python/kombu/log.py | 13 | 4164 | from __future__ import absolute_import
import os
import logging
import sys
from .utils import cached_property
from .utils.compat import WatchedFileHandler
from .utils.encoding import safe_repr, safe_str
from .utils.functional import maybe_promise
__all__ = ["LogMixin", "LOG_LEVELS", "get_loglevel", "setup_logging"]
LOG_LEVELS = dict(logging._levelNames)
LOG_LEVELS["FATAL"] = logging.FATAL
LOG_LEVELS[logging.FATAL] = "FATAL"
DISABLE_TRACEBACKS = os.environ.get("DISABLE_TRACEBACKS")
class NullHandler(logging.Handler):
def emit(self, record):
pass
def get_logger(logger):
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
if not logger.handlers:
logger.addHandler(NullHandler())
return logger
def anon_logger(name):
logger = logging.getLogger(name)
logger.addHandler(NullHandler())
return logger
def get_loglevel(level):
if isinstance(level, basestring):
return LOG_LEVELS[level]
return level
def naive_format_parts(fmt):
l = fmt.split('%')
for i, e in enumerate(l[1:]):
if not e or not l[i - 1]:
yield
elif e[0] in ["r", "s"]:
yield e[0]
def safeify_format(fmt, *args):
for index, type in enumerate(naive_format_parts(fmt)):
if not type:
yield args[index]
elif type == 'r':
yield safe_repr(args[index])
elif type == 's':
yield safe_str(args[index])
class LogMixin(object):
def debug(self, *args, **kwargs):
return self.log(logging.DEBUG, *args, **kwargs)
def info(self, *args, **kwargs):
return self.log(logging.INFO, *args, **kwargs)
def warn(self, *args, **kwargs):
return self.log(logging.WARN, *args, **kwargs)
def error(self, *args, **kwargs):
return self._error(logging.ERROR, *args, **kwargs)
def critical(self, *args, **kwargs):
return self._error(logging.CRITICAL, *args, **kwargs)
def _error(self, severity, *args, **kwargs):
kwargs.setdefault("exc_info", sys.exc_info())
if DISABLE_TRACEBACKS:
kwargs.pop("exc_info", None)
return self.log(severity, *args, **kwargs)
def annotate(self, text):
return "%s - %s" % (self.logger_name, text)
def log(self, severity, *args, **kwargs):
if self.logger.isEnabledFor(severity):
log = self.logger.log
if len(args) > 1 and isinstance(args[0], basestring):
expand = [maybe_promise(arg) for arg in args[1:]]
return log(severity,
self.annotate(args[0].replace('%r', '%s')),
*list(safeify_format(args[0], *expand)), **kwargs)
else:
return self.logger.log(severity,
self.annotate(" ".join(map(safe_str, args))),
**kwargs)
def get_logger(self):
return get_logger(self.logger_name)
def is_enabled_for(self, level):
return self.logger.isEnabledFor(self.get_loglevel(level))
def get_loglevel(self, level):
if not isinstance(level, int):
return LOG_LEVELS[level]
return level
@cached_property
def logger(self):
return self.get_logger()
@property
def logger_name(self):
return self.__class__.__name__
class Log(LogMixin):
def __init__(self, name, logger=None):
self._logger_name = name
self._logger = logger
def get_logger(self):
if self._logger:
return self._logger
return LogMixin.get_logger(self)
@property
def logger_name(self):
return self._logger_name
def setup_logging(loglevel=None, logfile=None):
logger = logging.getLogger()
loglevel = get_loglevel(loglevel or "ERROR")
logfile = logfile if logfile else sys.__stderr__
if not logger.handlers:
if hasattr(logfile, "write"):
handler = logging.StreamHandler(logfile)
else:
handler = WatchedFileHandler(logfile)
logger.addHandler(handler)
logger.setLevel(loglevel)
return logger
| bsd-3-clause |
aam-at/tensorflow | tensorflow/python/saved_model/model_utils/export_test.py | 14 | 13084 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model.model_utils import export_output
from tensorflow.python.saved_model.model_utils import export_utils
from tensorflow.python.saved_model.model_utils.mode_keys import KerasModeKeys
class ExportTest(test_util.TensorFlowTestCase):
def test_build_all_signature_defs_without_receiver_alternatives(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = array_ops.placeholder(dtypes.string)
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2":
export_output.ClassificationOutput(classes=output_2),
"head-3":
export_output.PredictOutput(outputs={"some_output_3": output_3}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor, output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor, output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor}, {"some_output_3": output_3})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_with_dict_alternatives(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = {
"foo": array_ops.placeholder(dtypes.int64),
"bar": array_ops.sparse_placeholder(dtypes.float32)
}
receiver_tensors_alternatives = {"other": receiver_tensors_alternative_1}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2":
export_output.ClassificationOutput(classes=output_2),
"head-3":
export_output.PredictOutput(outputs={"some_output_3": output_3}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor, output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor, output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor}, {"some_output_3": output_3}),
"other:head-3":
signature_def_utils.predict_signature_def(
receiver_tensors_alternative_1, {"some_output_3": output_3})
# Note that the alternatives 'other:serving_default' and
# 'other:head-2' are invalid, because regression and classification
# signatures must take a single string input. Here we verify that
# these invalid signatures are not included in the export_utils.
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_with_single_alternatives(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64)
receiver_tensors_alternative_2 = array_ops.sparse_placeholder(
dtypes.float32)
# Note we are passing single Tensors as values of
# receiver_tensors_alternatives, where normally that is a dict.
# In this case a dict will be created using the default receiver tensor
# name "input".
receiver_tensors_alternatives = {
"other1": receiver_tensors_alternative_1,
"other2": receiver_tensors_alternative_2
}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2":
export_output.ClassificationOutput(classes=output_2),
"head-3":
export_output.PredictOutput(outputs={"some_output_3": output_3}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor, output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor, output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor}, {"some_output_3": output_3}),
"other1:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_1},
{"some_output_3": output_3}),
"other2:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_2},
{"some_output_3": output_3})
# Note that the alternatives 'other:serving_default' and
# 'other:head-2' are invalid, because regression and classification
# signatures must take a single string input. Here we verify that
# these invalid signatures are not included in the export_utils.
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_export_outputs_required(self):
receiver_tensor = constant_op.constant(["11"])
with self.assertRaises(ValueError) as e:
export_utils.build_all_signature_defs(receiver_tensor, None)
self.assertTrue(str(e.exception).startswith(
"export_outputs must be a dict"))
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertLess(int(time_1), int(time_2))
self.assertLess(int(time_2), int(time_3))
def test_get_temp_export_dir(self):
export_dir = os.path.join("tmp", "export", "1576013284")
tmp_export_dir = export_utils.get_temp_export_dir(export_dir)
self.assertEqual(tmp_export_dir,
os.path.join(b"tmp", b"export", b"temp-1576013284"))
export_dir = os.path.join(b"tmp", b"export", b"1576013284")
tmp_export_dir = export_utils.get_temp_export_dir(export_dir)
self.assertEqual(tmp_export_dir,
os.path.join(b"tmp", b"export", b"temp-1576013284"))
def test_build_all_signature_defs_serving_only(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = {"input": array_ops.placeholder(dtypes.string)}
output_1 = constant_op.constant([1.])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(outputs=output_1),
"train":
export_output.TrainOutput(loss=output_1),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default":
signature_def_utils.predict_signature_def(receiver_tensor,
{"output": output_1})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, serving_only=False)
expected_signature_defs.update({
"train":
signature_def_utils.supervised_train_signature_def(
receiver_tensor, loss={"loss": output_1})
})
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_export_outputs_for_mode(self):
predictions = {"predictions": constant_op.constant([1.])}
loss = {"loss": constant_op.constant([2.])}
metrics = {
"metrics": (constant_op.constant([3.]), constant_op.constant([4.]))}
expected_metrics = {
"metrics/value": metrics["metrics"][0],
"metrics/update_op": metrics["metrics"][1]
}
def _build_export_output(mode):
return export_utils.export_outputs_for_mode(
mode, None, predictions, loss, metrics)
ret = _build_export_output(KerasModeKeys.TRAIN)
self.assertIn(signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.TrainOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.TEST)
self.assertIn(signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.EvalOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.PREDICT)
self.assertIn(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.PredictOutput)
self.assertEqual(export_out.outputs, predictions)
classes = constant_op.constant(["class5"])
ret = export_utils.export_outputs_for_mode(
KerasModeKeys.PREDICT,
{"classify": export_output.ClassificationOutput(
classes=classes)})
self.assertIn("classify", ret)
export_out = ret["classify"]
self.assertIsInstance(export_out, export_output.ClassificationOutput)
self.assertEqual(export_out.classes, classes)
if __name__ == "__main__":
test.main()
| apache-2.0 |
BadgerMaps/django-allauth | allauth/socialaccount/providers/weibo/tests.py | 71 | 1182 | from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import WeiboProvider
class WeiboTests(create_oauth2_tests(registry.by_id(WeiboProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """{"bi_followers_count": 0, "domain": "", "avatar_large": "http://tp3.sinaimg.cn/3195025850/180/0/0", "block_word": 0, "star": 0, "id": 3195025850, "city": "1", "verified": false, "follow_me": false, "verified_reason": "", "followers_count": 6, "location": "\u5317\u4eac \u4e1c\u57ce\u533a", "mbtype": 0, "profile_url": "u/3195025850", "province": "11", "statuses_count": 0, "description": "", "friends_count": 0, "online_status": 0, "mbrank": 0, "idstr": "3195025850", "profile_image_url": "http://tp3.sinaimg.cn/3195025850/50/0/0", "allow_all_act_msg": false, "allow_all_comment": true, "geo_enabled": true, "name": "pennersr", "lang": "zh-cn", "weihao": "", "remark": "", "favourites_count": 0, "screen_name": "pennersr", "url": "", "gender": "f", "created_at": "Tue Feb 19 19:43:39 +0800 2013", "verified_type": -1, "following": false}
""")
| mit |
c72578/poedit | deps/boost/tools/build/src/build/property.py | 7 | 23372 | # Status: ported, except for tests.
# Base revision: 64070
#
# Copyright 2001, 2002, 2003 Dave Abrahams
# Copyright 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import re
import sys
from functools import total_ordering
from b2.util.utility import *
from b2.build import feature
from b2.util import sequence, qualify_jam_action, is_iterable_typed
import b2.util.set
from b2.manager import get_manager
__re_two_ampersands = re.compile ('&&')
__re_comma = re.compile (',')
__re_split_condition = re.compile ('(.*):(<.*)')
__re_split_conditional = re.compile (r'(.+):<(.+)')
__re_colon = re.compile (':')
__re_has_condition = re.compile (r':<')
__re_separate_condition_and_property = re.compile (r'(.*):(<.*)')
_not_applicable_feature='not-applicable-in-this-context'
feature.feature(_not_applicable_feature, [], ['free'])
__abbreviated_paths = False
class PropertyMeta(type):
"""
This class exists to implement the isinstance() and issubclass()
hooks for the Property class. Since we've introduce the concept of
a LazyProperty, isinstance(p, Property) will fail when p is a LazyProperty.
Implementing both __instancecheck__ and __subclasscheck__ will allow
LazyProperty instances to pass the isinstance() and issubclass check for
the Property class.
Additionally, the __call__ method intercepts the call to the Property
constructor to ensure that calling Property with the same arguments
will always return the same Property instance.
"""
_registry = {}
current_id = 1
def __call__(mcs, f, value, condition=None):
"""
This intercepts the call to the Property() constructor.
This exists so that the same arguments will always return the same Property
instance. This allows us to give each instance a unique ID.
"""
from b2.build.feature import Feature
if not isinstance(f, Feature):
f = feature.get(f)
if condition is None:
condition = []
key = (f, value) + tuple(sorted(condition))
if key not in mcs._registry:
instance = super(PropertyMeta, mcs).__call__(f, value, condition)
mcs._registry[key] = instance
return mcs._registry[key]
@staticmethod
def check(obj):
return (hasattr(obj, 'feature') and
hasattr(obj, 'value') and
hasattr(obj, 'condition'))
def __instancecheck__(self, instance):
return self.check(instance)
def __subclasscheck__(self, subclass):
return self.check(subclass)
@total_ordering
class Property(object):
__slots__ = ('feature', 'value', 'condition', '_to_raw', '_hash', 'id')
__metaclass__ = PropertyMeta
def __init__(self, f, value, condition=None):
assert(f.free or ':' not in value)
if condition is None:
condition = []
self.feature = f
self.value = value
self.condition = condition
self._hash = hash((self.feature, self.value) + tuple(sorted(self.condition)))
self.id = PropertyMeta.current_id
# increment the id counter.
# this allows us to take a list of Property
# instances and use their unique integer ID
# to create a key for PropertySet caching. This is
# much faster than string comparison.
PropertyMeta.current_id += 1
condition_str = ''
if condition:
condition_str = ",".join(str(p) for p in self.condition) + ':'
self._to_raw = '{}<{}>{}'.format(condition_str, f.name, value)
def to_raw(self):
return self._to_raw
def __str__(self):
return self._to_raw
def __hash__(self):
return self._hash
def __eq__(self, other):
return self._hash == other._hash
def __lt__(self, other):
return (self.feature.name, self.value) < (other.feature.name, other.value)
@total_ordering
class LazyProperty(object):
def __init__(self, feature_name, value, condition=None):
if condition is None:
condition = []
self.__property = Property(
feature.get(_not_applicable_feature), feature_name + value, condition=condition)
self.__name = feature_name
self.__value = value
self.__condition = condition
self.__feature = None
def __getattr__(self, item):
if self.__feature is None:
try:
self.__feature = feature.get(self.__name)
self.__property = Property(self.__feature, self.__value, self.__condition)
except KeyError:
pass
return getattr(self.__property, item)
def __hash__(self):
return hash(self.__property)
def __str__(self):
return self.__property._to_raw
def __eq__(self, other):
return self.__property == other
def __lt__(self, other):
return (self.feature.name, self.value) < (other.feature.name, other.value)
def create_from_string(s, allow_condition=False,allow_missing_value=False):
assert isinstance(s, basestring)
assert isinstance(allow_condition, bool)
assert isinstance(allow_missing_value, bool)
condition = []
import types
if not isinstance(s, types.StringType):
print type(s)
if __re_has_condition.search(s):
if not allow_condition:
raise BaseException("Conditional property is not allowed in this context")
m = __re_separate_condition_and_property.match(s)
condition = m.group(1)
s = m.group(2)
# FIXME: break dependency cycle
from b2.manager import get_manager
if condition:
condition = [create_from_string(x) for x in condition.split(',')]
feature_name = get_grist(s)
if not feature_name:
if feature.is_implicit_value(s):
f = feature.implied_feature(s)
value = s
p = Property(f, value, condition=condition)
else:
raise get_manager().errors()("Invalid property '%s' -- unknown feature" % s)
else:
value = get_value(s)
if not value and not allow_missing_value:
get_manager().errors()("Invalid property '%s' -- no value specified" % s)
if feature.valid(feature_name):
p = Property(feature.get(feature_name), value, condition=condition)
else:
# In case feature name is not known, it is wrong to do a hard error.
# Feature sets change depending on the toolset. So e.g.
# <toolset-X:version> is an unknown feature when using toolset Y.
#
# Ideally we would like to ignore this value, but most of
# Boost.Build code expects that we return a valid Property. For this
# reason we use a sentinel <not-applicable-in-this-context> feature.
#
# The underlying cause for this problem is that python port Property
# is more strict than its Jam counterpart and must always reference
# a valid feature.
p = LazyProperty(feature_name, value, condition=condition)
return p
def create_from_strings(string_list, allow_condition=False):
assert is_iterable_typed(string_list, basestring)
return [create_from_string(s, allow_condition) for s in string_list]
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __results
# A cache of results from as_path
__results = {}
reset ()
def set_abbreviated_paths(on=True):
global __abbreviated_paths
if on == 'off':
on = False
on = bool(on)
__abbreviated_paths = on
def get_abbreviated_paths():
return __abbreviated_paths or '--abbreviated-paths' in sys.argv
def path_order (x, y):
""" Helper for as_path, below. Orders properties with the implicit ones
first, and within the two sections in alphabetical order of feature
name.
"""
if x == y:
return 0
xg = get_grist (x)
yg = get_grist (y)
if yg and not xg:
return -1
elif xg and not yg:
return 1
else:
if not xg:
x = feature.expand_subfeatures([x])
y = feature.expand_subfeatures([y])
if x < y:
return -1
elif x > y:
return 1
else:
return 0
def identify(string):
return string
# Uses Property
def refine (properties, requirements):
""" Refines 'properties' by overriding any non-free properties
for which a different value is specified in 'requirements'.
Conditional requirements are just added without modification.
Returns the resulting list of properties.
"""
assert is_iterable_typed(properties, Property)
assert is_iterable_typed(requirements, Property)
# The result has no duplicates, so we store it in a set
result = set()
# Records all requirements.
required = {}
# All the elements of requirements should be present in the result
# Record them so that we can handle 'properties'.
for r in requirements:
# Don't consider conditional requirements.
if not r.condition:
required[r.feature] = r
for p in properties:
# Skip conditional properties
if p.condition:
result.add(p)
# No processing for free properties
elif p.feature.free:
result.add(p)
else:
if p.feature in required:
result.add(required[p.feature])
else:
result.add(p)
return sequence.unique(list(result) + requirements)
def translate_paths (properties, path):
""" Interpret all path properties in 'properties' as relative to 'path'
The property values are assumed to be in system-specific form, and
will be translated into normalized form.
"""
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if p.feature.path:
values = __re_two_ampersands.split(p.value)
new_value = "&&".join(os.path.normpath(os.path.join(path, v)) for v in values)
if new_value != p.value:
result.append(Property(p.feature, new_value, p.condition))
else:
result.append(p)
else:
result.append (p)
return result
def translate_indirect(properties, context_module):
"""Assumes that all feature values that start with '@' are
names of rules, used in 'context-module'. Such rules can be
either local to the module or global. Qualified local rules
with the name of the module."""
assert is_iterable_typed(properties, Property)
assert isinstance(context_module, basestring)
result = []
for p in properties:
if p.value[0] == '@':
q = qualify_jam_action(p.value[1:], context_module)
get_manager().engine().register_bjam_action(q)
result.append(Property(p.feature, '@' + q, p.condition))
else:
result.append(p)
return result
def validate (properties):
""" Exit with error if any of the properties is not valid.
properties may be a single property or a sequence of properties.
"""
if isinstance(properties, Property):
properties = [properties]
assert is_iterable_typed(properties, Property)
for p in properties:
__validate1(p)
def expand_subfeatures_in_conditions (properties):
assert is_iterable_typed(properties, Property)
result = []
for p in properties:
if not p.condition:
result.append(p)
else:
expanded = []
for c in p.condition:
# It common that condition includes a toolset which
# was never defined, or mentiones subfeatures which
# were never defined. In that case, validation will
# only produce an spirious error, so don't validate.
expanded.extend(feature.expand_subfeatures ([c], True))
# we need to keep LazyProperties lazy
if isinstance(p, LazyProperty):
value = p.value
feature_name = get_grist(value)
value = value.replace(feature_name, '')
result.append(LazyProperty(feature_name, value, condition=expanded))
else:
result.append(Property(p.feature, p.value, expanded))
return result
# FIXME: this should go
def split_conditional (property):
""" If 'property' is conditional property, returns
condition and the property, e.g
<variant>debug,<toolset>gcc:<inlining>full will become
<variant>debug,<toolset>gcc <inlining>full.
Otherwise, returns empty string.
"""
assert isinstance(property, basestring)
m = __re_split_conditional.match (property)
if m:
return (m.group (1), '<' + m.group (2))
return None
def select (features, properties):
""" Selects properties which correspond to any of the given features.
"""
assert is_iterable_typed(properties, basestring)
result = []
# add any missing angle brackets
features = add_grist (features)
return [p for p in properties if get_grist(p) in features]
def validate_property_sets (sets):
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(sets, PropertySet)
for s in sets:
validate(s.all())
def evaluate_conditionals_in_context (properties, context):
""" Removes all conditional properties which conditions are not met
For those with met conditions, removes the condition. Properties
in conditions are looked up in 'context'
"""
if __debug__:
from .property_set import PropertySet
assert is_iterable_typed(properties, Property)
assert isinstance(context, PropertySet)
base = []
conditional = []
for p in properties:
if p.condition:
conditional.append (p)
else:
base.append (p)
result = base[:]
for p in conditional:
# Evaluate condition
# FIXME: probably inefficient
if all(x in context for x in p.condition):
result.append(Property(p.feature, p.value))
return result
def change (properties, feature, value = None):
""" Returns a modified version of properties with all values of the
given feature replaced by the given value.
If 'value' is None the feature will be removed.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(feature, basestring)
assert isinstance(value, (basestring, type(None)))
result = []
feature = add_grist (feature)
for p in properties:
if get_grist (p) == feature:
if value:
result.append (replace_grist (value, feature))
else:
result.append (p)
return result
################################################################
# Private functions
def __validate1 (property):
""" Exit with error if property is not valid.
"""
assert isinstance(property, Property)
msg = None
if not property.feature.free:
feature.validate_value_string (property.feature, property.value)
###################################################################
# Still to port.
# Original lines are prefixed with "# "
#
#
# import utility : ungrist ;
# import sequence : unique ;
# import errors : error ;
# import feature ;
# import regex ;
# import sequence ;
# import set ;
# import path ;
# import assert ;
#
#
# rule validate-property-sets ( property-sets * )
# {
# for local s in $(property-sets)
# {
# validate [ feature.split $(s) ] ;
# }
# }
#
def remove(attributes, properties):
"""Returns a property sets which include all the elements
in 'properties' that do not have attributes listed in 'attributes'."""
if isinstance(attributes, basestring):
attributes = [attributes]
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
attributes_new = feature.attributes(get_grist(e))
has_common_features = 0
for a in attributes_new:
if a in attributes:
has_common_features = 1
break
if not has_common_features:
result += e
return result
def take(attributes, properties):
"""Returns a property set which include all
properties in 'properties' that have any of 'attributes'."""
assert is_iterable_typed(attributes, basestring)
assert is_iterable_typed(properties, basestring)
result = []
for e in properties:
if b2.util.set.intersection(attributes, feature.attributes(get_grist(e))):
result.append(e)
return result
def translate_dependencies(properties, project_id, location):
assert is_iterable_typed(properties, Property)
assert isinstance(project_id, basestring)
assert isinstance(location, basestring)
result = []
for p in properties:
if not p.feature.dependency:
result.append(p)
else:
v = p.value
m = re.match("(.*)//(.*)", v)
if m:
rooted = m.group(1)
if rooted[0] == '/':
# Either project id or absolute Linux path, do nothing.
pass
else:
rooted = os.path.join(os.getcwd(), location, rooted)
result.append(Property(p.feature, rooted + "//" + m.group(2), p.condition))
elif os.path.isabs(v):
result.append(p)
else:
result.append(Property(p.feature, project_id + "//" + v, p.condition))
return result
class PropertyMap:
""" Class which maintains a property set -> string mapping.
"""
def __init__ (self):
self.__properties = []
self.__values = []
def insert (self, properties, value):
""" Associate value with properties.
"""
assert is_iterable_typed(properties, basestring)
assert isinstance(value, basestring)
self.__properties.append(properties)
self.__values.append(value)
def find (self, properties):
""" Return the value associated with properties
or any subset of it. If more than one
subset has value assigned to it, return the
value for the longest subset, if it's unique.
"""
assert is_iterable_typed(properties, basestring)
return self.find_replace (properties)
def find_replace(self, properties, value=None):
assert is_iterable_typed(properties, basestring)
assert isinstance(value, (basestring, type(None)))
matches = []
match_ranks = []
for i in range(0, len(self.__properties)):
p = self.__properties[i]
if b2.util.set.contains (p, properties):
matches.append (i)
match_ranks.append(len(p))
best = sequence.select_highest_ranked (matches, match_ranks)
if not best:
return None
if len (best) > 1:
raise NoBestMatchingAlternative ()
best = best [0]
original = self.__values[best]
if value:
self.__values[best] = value
return original
# local rule __test__ ( )
# {
# import errors : try catch ;
# import feature ;
# import feature : feature subfeature compose ;
#
# # local rules must be explicitly re-imported
# import property : path-order ;
#
# feature.prepare-test property-test-temp ;
#
# feature toolset : gcc : implicit symmetric ;
# subfeature toolset gcc : version : 2.95.2 2.95.3 2.95.4
# 3.0 3.0.1 3.0.2 : optional ;
# feature define : : free ;
# feature runtime-link : dynamic static : symmetric link-incompatible ;
# feature optimization : on off ;
# feature variant : debug release : implicit composite symmetric ;
# feature rtti : on off : link-incompatible ;
#
# compose <variant>debug : <define>_DEBUG <optimization>off ;
# compose <variant>release : <define>NDEBUG <optimization>on ;
#
# import assert ;
# import "class" : new ;
#
# validate <toolset>gcc <toolset>gcc-3.0.1 : $(test-space) ;
#
# assert.result <toolset>gcc <rtti>off <define>FOO
# : refine <toolset>gcc <rtti>off
# : <define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <optimization>on
# : refine <toolset>gcc <optimization>off
# : <optimization>on
# : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off
# : refine <toolset>gcc : <rtti>off : $(test-space)
# ;
#
# assert.result <toolset>gcc <rtti>off <rtti>off:<define>FOO
# : refine <toolset>gcc : <rtti>off <rtti>off:<define>FOO
# : $(test-space)
# ;
#
# assert.result <toolset>gcc:<define>foo <toolset>gcc:<define>bar
# : refine <toolset>gcc:<define>foo : <toolset>gcc:<define>bar
# : $(test-space)
# ;
#
# assert.result <define>MY_RELEASE
# : evaluate-conditionals-in-context
# <variant>release,<rtti>off:<define>MY_RELEASE
# : <toolset>gcc <variant>release <rtti>off
#
# ;
#
# try ;
# validate <feature>value : $(test-space) ;
# catch "Invalid property '<feature>value': unknown feature 'feature'." ;
#
# try ;
# validate <rtti>default : $(test-space) ;
# catch \"default\" is not a known value of feature <rtti> ;
#
# validate <define>WHATEVER : $(test-space) ;
#
# try ;
# validate <rtti> : $(test-space) ;
# catch "Invalid property '<rtti>': No value specified for feature 'rtti'." ;
#
# try ;
# validate value : $(test-space) ;
# catch "value" is not a value of an implicit feature ;
#
#
# assert.result <rtti>on
# : remove free implicit : <toolset>gcc <define>foo <rtti>on : $(test-space) ;
#
# assert.result <include>a
# : select include : <include>a <toolset>gcc ;
#
# assert.result <include>a
# : select include bar : <include>a <toolset>gcc ;
#
# assert.result <include>a <toolset>gcc
# : select include <bar> <toolset> : <include>a <toolset>gcc ;
#
# assert.result <toolset>kylix <include>a
# : change <toolset>gcc <include>a : <toolset> kylix ;
#
# # Test ordinary properties
# assert.result
# : split-conditional <toolset>gcc
# ;
#
# # Test properties with ":"
# assert.result
# : split-conditional <define>FOO=A::B
# ;
#
# # Test conditional feature
# assert.result <toolset>gcc,<toolset-gcc:version>3.0 <define>FOO
# : split-conditional <toolset>gcc,<toolset-gcc:version>3.0:<define>FOO
# ;
#
# feature.finish-test property-test-temp ;
# }
#
| mit |
caiostringari/swantools | test.py | 1 | 2740 |
import swantools.io
import swantools.utils
import swantools.plot
import datetime
import matplotlib.pyplot as plt
import numpy as np
def readtable():
R = swantools.io.SwanIO()
P = swantools.plot.SwanPlot()
# Reading TABLE dada with headers:
df = R.read_swantable('data/table.txt')
y = df["Hsig"]
x = df.index.values
P.timeseries(x,y,"Significant Wave Heights")
def readspc():
# Reading spectral data
R = swantools.io.SwanIO()
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
P = swantools.plot.SwanPlot()
P.spcplot(freqs,dirs,times[15],spectrum[15,:,:]*factors[15])
# for t, time in enumerate(times):
# P.spcplot(freqs,dirs,times[t],spectrum[t,:,:])
def readblock(mode):
R = swantools.io.SwanIO()
P = swantools.plot.SwanPlot()
if mode == "non-stat":
# Reading a block file - Non stationary example
lon,lat,times,hs = R.read_swanblock('data/block.mat','Hsig')
P.blockplot(lon,lat,hs[0,:,:],"Non-stationary Results")
# for t, time in enumerate(times):
# P.blockplot(lon,lat,hs[t,:,:],time.strftime("%Y%m%d %H:%M"))
elif mode == "stat":
# Reading a block file - Non stationary example
lon,lat,times,hs = R.read_swanblock('data/stat_block.mat','Hsig',stat=True)
P.blockplot(lon,lat,hs,"Stationary Results")
def writescp():
# Getting some data to play with
R = swantools.io.SwanIO()
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
# Re-writing the data
R.write_spectrum("spcout.spc",lat,lon,times,freqs,dirs,factors,spectrum)
# Plot to confirm
lat,lon,freqs,dirs,times,factors,spectrum = R.read_swanspc('spcout.spc')
P = swantools.plot.SwanPlot()
for t, time in enumerate(times):
P.spcplot(freqs,dirs,times[t],spectrum[t,:,:])
def netcdf_output():
R = swantools.io.SwanIO()
W = swantools.io.Converters()
lon,lat,times,hs = R.read_swanblock('data/block.mat','Hsig')
W.np2nc("Hsig.nc",lat,lon,times,hs,"Significant Wave Height")
def spectral_output():
R = swantools.io.SwanIO()
W = swantools.io.Converters()
lon,lat,freqs,dirs,times,factors,spectrum = R.read_swanspc('data/spectrum.spc')
W.spc2nc("spectrum.nc",lat,lon,freqs,dirs,times,factors,spectrum)
if __name__ == "__main__":
# # Table data
# import seaborn as sns
# with sns.axes_style("darkgrid"):
# readtable()
# Spectral data
readspc()
# Field data
readblock("non-stat")
# Convertung block to netCDF4
netcdf_output()
# Converting spctral file to netCDF4
spectral_output()
# Wrinting spctral data
writescp()
| gpl-2.0 |
uw-it-cte/uw-restclients | restclients/test/kws/key.py | 2 | 1974 | from django.test import TestCase
from django.conf import settings
from restclients.kws import KWS
from restclients.exceptions import DataFailureException
class KWSTestKeyData(TestCase):
def test_key_by_id(self):
with self.settings(
RESTCLIENTS_KWS_DAO_CLASS='restclients.dao_implementation.kws.File'):
kws = KWS()
key = kws.get_key('ee99defd-baee-43b0-9e1e-f8238dd106bb')
self.assertEquals(key.algorithm, 'AES128CBC', 'Correct algorithm')
self.assertEquals(key.cipher_mode, 'CBC', 'Correct cipher mode')
self.assertEquals(key.expiration.isoformat(), '2013-04-11T13:44:33', 'Correct expiration')
self.assertEquals(key.key_id, 'ee99defd-baee-43b0-9e1e-f8238dd106bb', 'Correct key ID')
self.assertEquals(key.key, 'Uv2JsxggfxF9OQNzIxAzDQ==', 'Correct key')
self.assertEquals(key.size, 128, 'Correct key size')
self.assertEquals(key.url, 'https://it-wseval1.s.uw.edu/key/v1/encryption/ee99defd-baee-43b0-9e1e-f8238dd106bb.json', 'Correct key URL')
def test_current_key(self):
with self.settings(
RESTCLIENTS_KWS_DAO_CLASS='restclients.dao_implementation.kws.File'):
kws = KWS()
key = kws.get_current_key('uw-student-registration')
self.assertEquals(key.algorithm, 'AES128CBC', 'Correct algorithm')
self.assertEquals(key.cipher_mode, 'CBC', 'Correct cipher mode')
self.assertEquals(key.expiration.isoformat(), '2013-04-11T13:44:33', 'Correct expiration')
self.assertEquals(key.key_id, 'ee99defd-baee-43b0-9e1e-f8238dd106bb', 'Correct key ID')
self.assertEquals(key.key, 'Uv2JsxggfxF9OQNzIxAzDQ==', 'Correct key')
self.assertEquals(key.size, 128, 'Correct key size')
self.assertEquals(key.url, 'https://it-wseval1.s.uw.edu/key/v1/encryption/ee99defd-baee-43b0-9e1e-f8238dd106bb.json', 'Correct key URL')
| apache-2.0 |
mou4e/zirconium | third_party/protobuf/python/google/protobuf/descriptor.py | 228 | 26625 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
if api_implementation.Type() == 'cpp':
if api_implementation.Version() == 2:
from google.protobuf.internal.cpp import _message
else:
from google.protobuf.internal import cpp_message
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
class DescriptorBase(object):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionaility.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
options: (descriptor_pb2.MessageOptions) Protocol message options or None
to use default message options.
file: (FileDescriptor) Reference to file descriptor.
"""
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self.nested_types = nested_types
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attributes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value: (bool) True if this field has a default value defined,
otherwise false.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
has_default_value=True):
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
if api_implementation.Type() == 'cpp':
if is_extension:
if api_implementation.Version() == 2:
self._cdescriptor = _message.GetExtensionDescriptor(full_name)
else:
self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name)
else:
if api_implementation.Version() == 2:
self._cdescriptor = _message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = cpp_message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = None
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file: (FileDescriptor) Reference to file descriptor.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto: An empty descriptor_pb2.EnumDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
file: (FileDescriptor) Reference to file info.
"""
def __init__(self, name, full_name, index, methods, options=None, file=None,
serialized_start=None, serialized_end=None):
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end)
self.index = index
self.methods = methods
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
for method in self.methods:
if name == method.name:
return method
return None
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto: An empty descriptor_pb2.ServiceDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
name: name of file, relative to root of source tree.
package: name of the package
serialized_pb: (str) Byte string of serialized
descriptor_pb2.FileDescriptorProto.
"""
def __init__(self, name, package, options=None, serialized_pb=None):
"""Constructor."""
super(FileDescriptor, self).__init__(options, 'FileOptions')
self.message_types_by_name = {}
self.name = name
self.package = package
self.serialized_pb = serialized_pb
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
if api_implementation.Version() == 2:
_message.BuildFile(self.serialized_pb)
else:
cpp_message.BuildFile(self.serialized_pb)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def MakeDescriptor(desc_proto, package=''):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
Returns:
A Descriptor for protobuf messages.
"""
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, None, None, None, False, None,
has_default_value=False)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
[], [], [])
| bsd-3-clause |
SuperDARNCanada/placeholderOS | tools/testing_utils/experiments/experiment_unittests.py | 2 | 7629 | """
Test module for the experiment_handler/experiment_prototype code.
It is run simply via 'python3 experiment_unittests.py' and will go through all tests
in the experiment_tests.csv file as well as the hardcoded tests here that don't fit nicely into
a csv file.
The csv file format is:
[#][experiment file module import name]::[regex error message]
The [#] is an optional comment, and that line will be removed
An example of a test line is:
testing_archive.my_test_experiment.py::Regex line that * matches the ExperimentException err msg
References:
https://stackoverflow.com/questions/32899/how-do-you-generate-dynamic-parameterized-unit-tests-in-python
https://docs.python.org/3/library/unittest.html
https://www.bnmetrics.com/blog/dynamic-import-in-python3
:copyright: 2020 SuperDARN Canada
:author: Kevin Krieger
"""
import unittest
import os
import sys
import inspect
import pkgutil
from pathlib import Path
from importlib import import_module
BOREALISPATH = os.environ['BOREALISPATH']
sys.path.append(BOREALISPATH)
# Need to hardcode this, as unittest does weird things when you supply an argument on command line,
# or if you use argparse. There is probably a better way
input_test_file = BOREALISPATH + "/tools/testing_utils/experiments/experiment_tests.csv"
# Call experiment handler main function like so: eh.main(['normalscan', 'discretionary'])
import experiment_handler.experiment_handler as eh
from experiment_prototype.experiment_exception import ExperimentException
from experiment_prototype.experiment_prototype import ExperimentPrototype
import experiments.superdarn_common_fields as scf
def ehmain(experiment='normalscan', scheduling_mode='discretionary'):
"""
Convenience method to call the experiment handler with arguments
"""
eh.main([experiment, scheduling_mode])
class TestExperimentEnvSetup(unittest.TestCase):
"""
A unittest class to test the environment setup for the experiment_handler module.
All test methods must begin with the word 'test' to be run by unittest.
"""
def setUp(self):
"""
This function is called before every test_* method (every test case in unittest lingo)
"""
print("Method: ", self._testMethodName)
def test_no_args(self):
"""
Test calling the experiment handler without any command line arguments, which returns 2
"""
with self.assertRaisesRegex(SystemExit, "2"):
eh.main([])
#def test_borealispath(self):
# """
# Test failure to have BOREALISPATH in env
# """
# Need to remove the environment variable, reset for other tests
# os.environ.pop('BOREALISPATH')
# sys.path.remove(BOREALISPATH)
# del os.environ['BOREALISPATH']
# os.unsetenv('BOREALISPATH')
# with self.assertRaisesRegex(KeyError, "BOREALISPATH"):
# ehmain()
# os.environ['BOREALISPATH'] = BOREALISPATH
# sys.path.append(BOREALISPATH)
def test_config_file(self):
"""
Test the code that checks for the config file
"""
# Rename the config file temporarily
os.rename(BOREALISPATH + '/config.ini', BOREALISPATH + '/_config.ini')
with self.assertRaisesRegex(ExperimentException, "Cannot open config file at "):
ehmain()
# experiment_prototype.experiment_exception.ExperimentException: Cannot open config file
# at /home/kevin/PycharmProjects/borealis//config.ini
# Now rename the config file and move on
os.rename(BOREALISPATH + '/_config.ini', BOREALISPATH + '/config.ini')
def test_hdw_file(self):
"""
Test the code that checks for the hdw.dat file
"""
site_name = scf.opts.site_id
# Rename the hdw.dat file temporarily
os.rename(BOREALISPATH + '/hdw.dat.{}'.format(site_name),
BOREALISPATH + '/_hdw.dat.{}'.format(site_name))
with self.assertRaisesRegex(ExperimentException, "Cannot open hdw.dat.[a-z]{3} file at"):
ehmain()
# experiment_prototype.experiment_exception.ExperimentException: Cannot open hdw.dat.sas
# file at /home/kevin/PycharmProjects/borealis//hdw.dat.sas
# Now rename the hdw.dat file and move on
os.rename(BOREALISPATH + '/_hdw.dat.{}'.format(site_name),
BOREALISPATH + '/hdw.dat.{}'.format(site_name))
def test_all_experiments(self):
"""
Test that all experiments in the experiments folder run without issues
"""
# This iterates through modules in the experiments directory
for (_, name, _) in pkgutil.iter_modules([Path(BOREALISPATH + '/experiments/')]):
# This imports any module found in the experiments directory
imported_module = import_module('.' + name, package='experiments')
# This for loop goes through all attributes of the imported module
for i in dir(imported_module):
attribute = getattr(imported_module, i)
# If the attribute is the class, and it's a subclass of ExperimentPrototype,
# and it's not ExperimentPrototype, then run it
if inspect.isclass(attribute) and issubclass(attribute, ExperimentPrototype):
print("{}: {}".format(attribute, name))
if 'ExperimentPrototype' in str(attribute):
break
attribute()
class TestExperimentExceptions(unittest.TestCase):
"""
A unittest class to test various ways for an experiment to fail for the experiment_handler
module. All test methods must begin with the word 'test' to be run by unittest.
"""
def setUp(self):
"""
This function is called before every test_* method (every test case in unittest lingo)
"""
print("Method: ", self._testMethodName)
def test_generator(module_name, exception_msg_regex):
"""
Generate a single test for the given module name and exception message
:param module_name: Experiment module name, string (i.e. 'normalscan')
:param exception_msg_regex: Error msg the experiment module is expected to return, regex string
"""
def test(self):
with self.assertRaisesRegex(ExperimentException, exception_msg_regex):
ehmain(experiment=module_name)
return test
if __name__ == '__main__':
# Redirect stderr because it's annoying
# null = open(os.devnull, 'w')
# sys.stderr = null
# Open the file given on command line with a set of tests, one per line.
# File format is: [experiment module]::[string regex message that the experiment will raise]
# Generate a single test for each of the lines in the file.
try:
with open(input_test_file) as test_suite_list:
for test in test_suite_list.readlines():
# Remove comment lines and empty lines
if test.startswith('#') or test.strip() == '':
continue
# Separate on double colon to ensure the regex msg isn't split
exp_module_name = test.split('::')[0]
exp_exception_msg_regex = test.split('::')[1]
test = test_generator(exp_module_name, exp_exception_msg_regex.strip())
# setattr is used to add properly named test methods to TestExperimentExceptions
setattr(TestExperimentExceptions, exp_module_name, test)
print("Done building tests")
except TypeError:
print("No extra tests supplied, only performing basic tests")
unittest.main()
| gpl-3.0 |
sargas/scipy | scipy/sparse/linalg/eigen/lobpcg/lobpcg.py | 4 | 18421 | """
Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate
Gradient Method (LOBPCG), see
http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/
License: BSD
Authors: Robert Cimrman, Andrew Knyazev
Examples in tests directory contributed by Nils Wagner.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
import scipy as sp
from scipy.lib.six.moves import xrange
from scipy.sparse.linalg import aslinearoperator, LinearOperator
__all__ = ['lobpcg']
## try:
## from symeig import symeig
## except:
## raise ImportError('lobpcg requires symeig')
def symeig( mtxA, mtxB = None, eigenvectors = True, select = None ):
import scipy.linalg as sla
if select is None:
if np.iscomplexobj( mtxA ):
if mtxB is None:
fun = sla.get_lapack_funcs('heev', arrays=(mtxA,))
else:
fun = sla.get_lapack_funcs('hegv', arrays=(mtxA,))
else:
if mtxB is None:
fun = sla.get_lapack_funcs('syev', arrays=(mtxA,))
else:
fun = sla.get_lapack_funcs('sygv', arrays=(mtxA,))
## print fun
if mtxB is None:
out = fun( mtxA )
else:
out = fun( mtxA, mtxB )
out = out[1], out[0], out[2]
## print w
## print v
## print info
## from symeig import symeig
## print symeig( mtxA, mtxB )
else:
out = sla.eig( mtxA, mtxB, right = eigenvectors )
w = out[0]
ii = np.argsort( w )
w = w[slice( *select )]
if eigenvectors:
v = out[1][:,ii]
v = v[:,slice( *select )]
out = w, v, 0
else:
out = w, 0
return out[:-1]
def pause():
input()
def save( ar, fileName ):
from numpy import savetxt
savetxt( fileName, ar, precision = 8 )
##
# 21.05.2007, c
def as2d( ar ):
"""
If the input array is 2D return it, if it is 1D, append a dimension,
making it a column vector.
"""
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array( ar, copy = False )
aux.shape = (ar.shape[0], 1)
return aux
class CallableLinearOperator(LinearOperator):
def __call__(self, x):
return self.matmat(x)
def makeOperator( operatorInput, expectedShape ):
"""Internal. Takes a dense numpy array or a sparse matrix or
a function and makes an operator performing matrix * blockvector
products.
Examples
--------
>>> A = makeOperator( arrayA, (n, n) )
>>> vectorB = A( vectorX )
"""
if operatorInput is None:
def ident(x):
return x
operator = LinearOperator(expectedShape, ident, matmat=ident)
else:
operator = aslinearoperator(operatorInput)
if operator.shape != expectedShape:
raise ValueError('operator has invalid shape')
if sys.version_info[0] >= 3:
# special methods are looked up on the class -- so make a new one
operator.__class__ = CallableLinearOperator
else:
operator.__call__ = operator.matmat
return operator
def applyConstraints( blockVectorV, factYBY, blockVectorBY, blockVectorY ):
"""Internal. Changes blockVectorV in place."""
gramYBV = sp.dot( blockVectorBY.T, blockVectorV )
import scipy.linalg as sla
tmp = sla.cho_solve( factYBY, gramYBV )
blockVectorV -= sp.dot( blockVectorY, tmp )
def b_orthonormalize( B, blockVectorV,
blockVectorBV = None, retInvR = False ):
"""Internal."""
import scipy.linalg as sla
if blockVectorBV is None:
if B is not None:
blockVectorBV = B( blockVectorV )
else:
blockVectorBV = blockVectorV # Shared data!!!
gramVBV = sp.dot( blockVectorV.T, blockVectorBV )
gramVBV = sla.cholesky( gramVBV )
gramVBV = sla.inv( gramVBV, overwrite_a = True )
# gramVBV is now R^{-1}.
blockVectorV = sp.dot( blockVectorV, gramVBV )
if B is not None:
blockVectorBV = sp.dot( blockVectorBV, gramVBV )
if retInvR:
return blockVectorV, blockVectorBV, gramVBV
else:
return blockVectorV, blockVectorBV
def lobpcg( A, X,
B=None, M=None, Y=None,
tol= None, maxiter=20,
largest = True, verbosityLevel = 0,
retLambdaHistory = False, retResidualNormsHistory = False ):
"""Solve symmetric partial eigenproblems with optional preconditioning
This function implements the Locally Optimal Block Preconditioned
Conjugate Gradient Method (LOBPCG).
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The symmetric linear operator of the problem, usually a
sparse matrix. Often called the "stiffness matrix".
X : array_like
Initial approximation to the k eigenvectors. If A has
shape=(n,n) then X should have shape shape=(n,k).
B : {dense matrix, sparse matrix, LinearOperator}, optional
the right hand side operator in a generalized eigenproblem.
by default, B = Identity
often called the "mass matrix"
M : {dense matrix, sparse matrix, LinearOperator}, optional
preconditioner to A; by default M = Identity
M should approximate the inverse of A
Y : array_like, optional
n-by-sizeY matrix of constraints, sizeY < n
The iterations will be performed in the B-orthogonal complement
of the column-space of Y. Y must be full rank.
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors. V has the same shape as X.
Other Parameters
----------------
tol : scalar, optional
Solver tolerance (stopping criterion)
by default: tol=n*sqrt(eps)
maxiter : integer, optional
maximum number of iterations
by default: maxiter=min(n,20)
largest : boolean, optional
when True, solve for the largest eigenvalues, otherwise the smallest
verbosityLevel : integer, optional
controls solver output. default: verbosityLevel = 0.
retLambdaHistory : boolean, optional
whether to return eigenvalue history
retResidualNormsHistory : boolean, optional
whether to return history of residual norms
Notes
-----
If both retLambdaHistory and retResidualNormsHistory are True, the
return tuple has the following format
(lambda, V, lambda history, residual norms history)
"""
failureFlag = True
import scipy.linalg as sla
blockVectorX = X
blockVectorY = Y
residualTolerance = tol
maxIterations = maxiter
if blockVectorY is not None:
sizeY = blockVectorY.shape[1]
else:
sizeY = 0
# Block size.
if len(blockVectorX.shape) != 2:
raise ValueError('expected rank-2 array for argument X')
n, sizeX = blockVectorX.shape
if sizeX > n:
raise ValueError('X column dimension exceeds the row dimension')
A = makeOperator(A, (n,n))
B = makeOperator(B, (n,n))
M = makeOperator(M, (n,n))
if (n - sizeY) < (5 * sizeX):
#warn('The problem size is small compared to the block size.' \
# ' Using dense eigensolver instead of LOBPCG.')
if blockVectorY is not None:
raise NotImplementedError('symeig does not support constraints')
if largest:
lohi = (n - sizeX, n)
else:
lohi = (1, sizeX)
A_dense = A(np.eye(n))
if B is not None:
B_dense = B(np.eye(n))
_lambda, eigBlockVector = symeig(A_dense, B_dense, select=lohi )
else:
_lambda, eigBlockVector = symeig(A_dense, select=lohi )
return _lambda, eigBlockVector
if residualTolerance is None:
residualTolerance = np.sqrt( 1e-15 ) * n
maxIterations = min( n, maxIterations )
if verbosityLevel:
aux = "Solving "
if B is None:
aux += "standard"
else:
aux += "generalized"
aux += " eigenvalue problem with"
if M is None:
aux += "out"
aux += " preconditioning\n\n"
aux += "matrix size %d\n" % n
aux += "block size %d\n\n" % sizeX
if blockVectorY is None:
aux += "No constraints\n\n"
else:
if sizeY > 1:
aux += "%d constraints\n\n" % sizeY
else:
aux += "%d constraint\n\n" % sizeY
print(aux)
##
# Apply constraints to X.
if blockVectorY is not None:
if B is not None:
blockVectorBY = B( blockVectorY )
else:
blockVectorBY = blockVectorY
# gramYBY is a dense array.
gramYBY = sp.dot( blockVectorY.T, blockVectorBY )
try:
# gramYBY is a Cholesky factor from now on...
gramYBY = sla.cho_factor( gramYBY )
except:
raise ValueError('cannot handle linearly dependent constraints')
applyConstraints( blockVectorX, gramYBY, blockVectorBY, blockVectorY )
##
# B-orthonormalize X.
blockVectorX, blockVectorBX = b_orthonormalize( B, blockVectorX )
##
# Compute the initial Ritz vectors: solve the eigenproblem.
blockVectorAX = A( blockVectorX )
gramXAX = sp.dot( blockVectorX.T, blockVectorAX )
# gramXBX is X^T * X.
gramXBX = sp.dot( blockVectorX.T, blockVectorX )
_lambda, eigBlockVector = symeig( gramXAX )
ii = np.argsort( _lambda )[:sizeX]
if largest:
ii = ii[::-1]
_lambda = _lambda[ii]
eigBlockVector = np.asarray( eigBlockVector[:,ii] )
blockVectorX = sp.dot( blockVectorX, eigBlockVector )
blockVectorAX = sp.dot( blockVectorAX, eigBlockVector )
if B is not None:
blockVectorBX = sp.dot( blockVectorBX, eigBlockVector )
##
# Active index set.
activeMask = np.ones( (sizeX,), dtype = np.bool )
lambdaHistory = [_lambda]
residualNormsHistory = []
previousBlockSize = sizeX
ident = np.eye( sizeX, dtype = A.dtype )
ident0 = np.eye( sizeX, dtype = A.dtype )
##
# Main iteration loop.
for iterationNumber in xrange( maxIterations ):
if verbosityLevel > 0:
print('iteration %d' % iterationNumber)
aux = blockVectorBX * _lambda[np.newaxis,:]
blockVectorR = blockVectorAX - aux
aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 )
residualNorms = np.sqrt( aux )
residualNormsHistory.append( residualNorms )
ii = np.where( residualNorms > residualTolerance, True, False )
activeMask = activeMask & ii
if verbosityLevel > 2:
print(activeMask)
currentBlockSize = activeMask.sum()
if currentBlockSize != previousBlockSize:
previousBlockSize = currentBlockSize
ident = np.eye( currentBlockSize, dtype = A.dtype )
if currentBlockSize == 0:
failureFlag = False # All eigenpairs converged.
break
if verbosityLevel > 0:
print('current block size:', currentBlockSize)
print('eigenvalue:', _lambda)
print('residual norms:', residualNorms)
if verbosityLevel > 10:
print(eigBlockVector)
activeBlockVectorR = as2d( blockVectorR[:,activeMask] )
if iterationNumber > 0:
activeBlockVectorP = as2d( blockVectorP [:,activeMask] )
activeBlockVectorAP = as2d( blockVectorAP[:,activeMask] )
activeBlockVectorBP = as2d( blockVectorBP[:,activeMask] )
if M is not None:
# Apply preconditioner T to the active residuals.
activeBlockVectorR = M( activeBlockVectorR )
##
# Apply constraints to the preconditioned residuals.
if blockVectorY is not None:
applyConstraints( activeBlockVectorR,
gramYBY, blockVectorBY, blockVectorY )
##
# B-orthonormalize the preconditioned residuals.
aux = b_orthonormalize( B, activeBlockVectorR )
activeBlockVectorR, activeBlockVectorBR = aux
activeBlockVectorAR = A( activeBlockVectorR )
if iterationNumber > 0:
aux = b_orthonormalize( B, activeBlockVectorP,
activeBlockVectorBP, retInvR = True )
activeBlockVectorP, activeBlockVectorBP, invR = aux
activeBlockVectorAP = sp.dot( activeBlockVectorAP, invR )
##
# Perform the Rayleigh Ritz Procedure:
# Compute symmetric Gram matrices:
xaw = sp.dot( blockVectorX.T, activeBlockVectorAR )
waw = sp.dot( activeBlockVectorR.T, activeBlockVectorAR )
xbw = sp.dot( blockVectorX.T, activeBlockVectorBR )
if iterationNumber > 0:
xap = sp.dot( blockVectorX.T, activeBlockVectorAP )
wap = sp.dot( activeBlockVectorR.T, activeBlockVectorAP )
pap = sp.dot( activeBlockVectorP.T, activeBlockVectorAP )
xbp = sp.dot( blockVectorX.T, activeBlockVectorBP )
wbp = sp.dot( activeBlockVectorR.T, activeBlockVectorBP )
gramA = np.bmat( [[np.diag( _lambda ), xaw, xap],
[ xaw.T, waw, wap],
[ xap.T, wap.T, pap]] )
gramB = np.bmat( [[ident0, xbw, xbp],
[ xbw.T, ident, wbp],
[ xbp.T, wbp.T, ident]] )
else:
gramA = np.bmat( [[np.diag( _lambda ), xaw],
[ xaw.T, waw]] )
gramB = np.bmat( [[ident0, xbw],
[ xbw.T, ident]] )
try:
assert np.allclose( gramA.T, gramA )
except:
print(gramA.T - gramA)
raise
try:
assert np.allclose( gramB.T, gramB )
except:
print(gramB.T - gramB)
raise
if verbosityLevel > 10:
save( gramA, 'gramA' )
save( gramB, 'gramB' )
##
# Solve the generalized eigenvalue problem.
# _lambda, eigBlockVector = la.eig( gramA, gramB )
_lambda, eigBlockVector = symeig( gramA, gramB )
ii = np.argsort( _lambda )[:sizeX]
if largest:
ii = ii[::-1]
if verbosityLevel > 10:
print(ii)
_lambda = _lambda[ii].astype( np.float64 )
eigBlockVector = np.asarray( eigBlockVector[:,ii].astype( np.float64 ) )
lambdaHistory.append( _lambda )
if verbosityLevel > 10:
print('lambda:', _lambda)
## # Normalize eigenvectors!
## aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
## eigVecNorms = np.sqrt( aux )
## eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:]
# eigBlockVector, aux = b_orthonormalize( B, eigBlockVector )
if verbosityLevel > 10:
print(eigBlockVector)
pause()
##
# Compute Ritz vectors.
if iterationNumber > 0:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = sp.dot( activeBlockVectorR, eigBlockVectorR )
pp += sp.dot( activeBlockVectorP, eigBlockVectorP )
app = sp.dot( activeBlockVectorAR, eigBlockVectorR )
app += sp.dot( activeBlockVectorAP, eigBlockVectorP )
bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR )
bpp += sp.dot( activeBlockVectorBP, eigBlockVectorP )
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = sp.dot( activeBlockVectorR, eigBlockVectorR )
app = sp.dot( activeBlockVectorAR, eigBlockVectorR )
bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR )
if verbosityLevel > 10:
print(pp)
print(app)
print(bpp)
pause()
blockVectorX = sp.dot( blockVectorX, eigBlockVectorX ) + pp
blockVectorAX = sp.dot( blockVectorAX, eigBlockVectorX ) + app
blockVectorBX = sp.dot( blockVectorBX, eigBlockVectorX ) + bpp
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
aux = blockVectorBX * _lambda[np.newaxis,:]
blockVectorR = blockVectorAX - aux
aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 )
residualNorms = np.sqrt( aux )
if verbosityLevel > 0:
print('final eigenvalue:', _lambda)
print('final residual norms:', residualNorms)
if retLambdaHistory:
if retResidualNormsHistory:
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
else:
return _lambda, blockVectorX, lambdaHistory
else:
if retResidualNormsHistory:
return _lambda, blockVectorX, residualNormsHistory
else:
return _lambda, blockVectorX
###########################################################################
if __name__ == '__main__':
from scipy.sparse import spdiags, speye, issparse
import time
## def B( vec ):
## return vec
n = 100
vals = [np.arange( n, dtype = np.float64 ) + 1]
A = spdiags( vals, 0, n, n )
B = speye( n, n )
# B[0,0] = 0
B = np.eye( n, n )
Y = np.eye( n, 3 )
# X = sp.rand( n, 3 )
xfile = {100 : 'X.txt', 1000 : 'X2.txt', 10000 : 'X3.txt'}
X = np.fromfile( xfile[n], dtype = np.float64, sep = ' ' )
X.shape = (n, 3)
ivals = [1./vals[0]]
def precond( x ):
invA = spdiags( ivals, 0, n, n )
y = invA * x
if issparse( y ):
y = y.toarray()
return as2d( y )
precond = spdiags( ivals, 0, n, n )
# precond = None
tt = time.clock()
# B = None
eigs, vecs = lobpcg( X, A, B, blockVectorY = Y,
M = precond,
residualTolerance = 1e-4, maxIterations = 40,
largest = False, verbosityLevel = 1 )
print('solution time:', time.clock() - tt)
print(vecs)
print(eigs)
| bsd-3-clause |
DCSaunders/tensorflow | tensorflow/contrib/framework/python/ops/ops.py | 74 | 2164 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
__all__ = ['get_graph_from_inputs']
def get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
1. If `graph` is provided, we validate that all inputs in `op_input_list` are
from the same graph.
2. Otherwise, we attempt to select a graph from the first Operation- or
Tensor-valued input in `op_input_list`, and validate that all other
such inputs are in the same graph.
3. If the graph was not specified and it could not be inferred from
`op_input_list`, we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If `op_input_list` is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
# pylint: disable=protected-access
return ops._get_graph_from_inputs(op_input_list, graph)
| apache-2.0 |
rcalsaverini/dictUtils | __init__.py | 1 | 1573 |
def keyset(d):
return set(d.keys())
def union(d1, d2):
return keyset(d1).union(keyset(d2))
def zip(d1, d2, std = None):
""" Return a dictionary of tuples by putting together values with the same keys. Ex.:
> zip({'name': "John", 'age': 20, 'favoriteMovie': "Godfather"}, {'name': "Mary", 'age': 26}, std = None)
{'name': ("John", "Mary"), 'age': (20, 26), 'favoriteMovie': ("Godfather", None)}
"""
return {key: (d1.get(key, std), d2.get(key, std)) for key in union(d1, d2)}
def zipWith(function, d1, d2, std = None):
""" Return a dictionary resulting from appling a binary function over values with the same keys. Ex.:
> purchase1 = {"mushrooms": 20, "beer": 293, "clothes": 1}
> purchase2 = {"cognac": 1, "beer": 20, "cars": 2}
> zipWith(lambda x,y : x + y, purchase1, purchase2, std = 0.0)
{"mushrooms": 20, "beer": 313, "clothes": 1, "cognac": 1, "cars":2}
"""
zipped = zip(d1, d2, std = std)
return {key: function(value1, value2) for (key, (value1, value2)) in zipped.iteritems()}
if __name__ == "__main__":
test1 = zip({'name': "John", 'age': 20, 'favoriteMovie': "Godfather"}, {'name': "Mary", 'age': 26}, std = None)
assert test1 == {'name': ("John", "Mary"), 'age': (20, 26), 'favoriteMovie': ("Godfather", None)}
purchase1 = {"mushrooms": 20, "beer": 293, "clothes": 1}
purchase2 = {"cognac": 1, "beer": 20, "cars": 2}
test2 = zipWith(lambda x,y : x + y, purchase1, purchase2, std = 0.0)
assert test2 == {"mushrooms": 20, "beer": 313, "clothes": 1, "cognac": 1, "cars":2}
| mit |
numenta/nupic.research | projects/dynamic_sparse/experiments/test_customresnet.py | 2 | 1950 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.research.archive.dynamic_sparse.common.utils import run_ray
# experiment configurations
base_exp_config = dict(
device="cuda",
dataset_name="ImageNet",
use_multiple_gpus=True,
model="BaseModel",
data_dir="~/nta/datasets",
num_classes=100,
epochs=120,
batch_size_train=1024,
batch_size_test=1024,
# ---- network related
network="resnet50",
pretrained=False,
# ---- optimizer related
optim_alg="SGD",
learning_rate=0.1,
lr_scheduler="MultiStepLR",
lr_milestones=[55, 85, 105],
lr_gamma=0.1,
weight_decay=1e-5,
momentum=0.9,
nesterov_momentum=True,
# optim_alg="Adam",
# learning_rate=2e-3,
# weight_decay=2e-4,
)
# ray configurations
tune_config = dict(
num_samples=1,
name=__file__.replace(".py", ""),
checkpoint_freq=0,
checkpoint_at_end=True,
resources_per_trial={"cpu": 60, "gpu": 8},
verbose=2,
)
run_ray(tune_config, base_exp_config)
| agpl-3.0 |
stackforge/monasca-notification | monasca_notification/conf/keystone.py | 2 | 2004 | # Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
keystone_group = cfg.OptGroup('keystone',
title='Keystone Options',
help='Options under this group allow to configure '
'valid connection via Keystone'
'authentication.')
keystone_opts = [
cfg.BoolOpt(name='auth_required', default='False',
help='This option enable or disable authentication using '
'keystone'),
cfg.StrOpt(name='auth_url', default='http://127.0.0.1/identity/v3',
help='URL of identity service'),
cfg.StrOpt(name='username', default='admin',
help='Username'),
cfg.StrOpt(name='password', default='password',
help='Password of identity service'),
cfg.StrOpt(name='project_name', default='admin',
help='Name of project'),
cfg.StrOpt(name='user_domain_name', default='default',
help='User domain name'),
cfg.StrOpt(name='project_domain_name', default='default',
help='Project domain name'),
cfg.StrOpt(name='auth_type', default='password',
help='Type of authentication')
]
def register_opts(conf):
conf.register_group(keystone_group)
conf.register_opts(keystone_opts, group=keystone_group)
def list_opts():
return {
keystone_group: keystone_opts
}
| apache-2.0 |
tpsatish95/OCR-on-Indus-Seals | code/Test/TextROI.py | 1 | 16306 | # -*- coding: utf-8 -*-
import skimage.io
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import selectivesearch
import numpy as np
import skimage.transform
import os
import shutil
import caffe
from PIL import Image
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
text_cut_final = set()
def getClass(FileList):
caffe.set_mode_gpu()
classifier = caffe.Classifier("../ROIs_Indus/deploy.prototxt","../ROIs_Indus/Models/bvlc_googlenet_indusnet_iter_20000.caffemodel" ,
image_dims=[224,224], raw_scale=255.0, channel_swap = [2,1,0])
inputs = [caffe.io.load_image(im_f) for im_f in FileList]
print("Classifying %d inputs." % len(inputs))
predictions = classifier.predict(inputs)
return predictions
def texbox_ext():
global text
global both_text
global text_cut_final
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in both_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1']
aw = A['x1'] + A['w'] - B['x1']
overf = 1
# if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
if A['x1'] < B['x1']: # B is right to A
aw = B['x1']+B['w'] - A['x1']
overf = 1
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut_final.add((A['x1'],A['y1'],A['w'],A['h']))
text_cut_final = text_cut_final - both_text # CHANGE THIS LINE
def texbox_cut():
global no_text
no_text = no_text.union(both_text)
for x, y, w, h in text:
A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
for x1, y1, w1, h1 in no_text:
B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overf = 0
ax1,ay1,aw,ah = A['x1'],A['y1'],A['w'],A['h']
if overlap_AB > 0.0:
if A['x1'] > B['x1'] and abs(B['x1']+B['w'] - A['x1']) < A['w']*0.20: # B is left to A
ax1 = B['x1'] + B['w']
overf = 1
if A['y1'] < B['y1'] and abs(A['y1']-B['y1']) > A['h']*0.70: # B is bottom to A
ah = A['h'] - (A['y1']+A['h'] - B['y1'])
overf = 1
# if A['y1'] > B['y1']: # B is top to A
# ay1 = B['y1'] + B['h']
# if A['x1'] < B['x1']: # B is right to A
# aw = A['w'] - (A['x1']+A['w'] - B['x1'])
# if A['y1'] < B['y1']: # B is bottom to A
# ah = A['h'] - (A['y1']+A['h'] - B['y1'])
# REPLACE by Cohen Suderland algo
A['x1'],A['y1'],A['w'],A['h'] = ax1,ay1,aw,ah
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
if overf == 1:
break
text_cut.add((A['x1'],A['y1'],A['w'],A['h']))
def extend_text_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def draw_textbox():
global width, height
thresh = ((width+height)/2)*(0.25)
tempc = set()
for x, y, w, h in text_boxes:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
f = 0
for x1, y1, w1, h1 in text_boxes:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
f = 1
if f == 0:
text.add((x, y, w, h))
text.add(extend_text_rect(temp))
def contains():
x1, y1, w1, h1 = p
for x, y, w, h in candidates:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
return True
if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
candidates.remove((x, y, w, h))
return False
return False
def extend_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[3] for i in l]))
def extend_superbox():
global width, height
thresh = ((width+height)/2)*(0.06)
tempc = set()
for x, y, w, h in final:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in final:
if abs(y1-y) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
final_extended.add(extend_rect(temp))
def draw_superbox(finals=[]):
noover = []
refinedT = []
global final
final = set()
# (x1,y1) top-left coord, (x2,y2) bottom-right coord, (w,h) size
if finals != []:
refinedT = finals
else:
refinedT = refined
remp = set(refinedT)
ref = list(refinedT)
while len(ref) > 0:
x1, y1, w1, h1 = ref[0]
if len(ref) == 1: # final box
final.add((x1, y1, w1, h1))
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
else:
ref.remove((x1, y1, w1, h1))
remp.remove((x1, y1, w1, h1))
over = set()
for x2, y2, w2, h2 in remp:
A = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
B = {'x1': x2, 'y1': y2, 'x2': x2+w2, 'y2': y2+h2, 'w': w2, 'h': h2}
# overlap between A and B
SA = A['w']*A['h']
SB = B['w']*B['h']
SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
SU = SA + SB - SI
overlap_AB = float(SI) / float(SU)
overlap_A = float(SI) / float(SA)
overlap_B = float(SI) / float(SB)
# print(overlap_AB)
#
if overlap_A >= 0.40 or overlap_B >= 0.40:
over.add((B['x1'],B['y1'],B['w'],B['h']))
# print(len(over))
if len(over) != 0: #Overlap
remp = remp - over
for i in over: ref.remove(i)
over.add((A['x1'],A['y1'],A['w'],A['h']))
# print(over)
final.add((min([i[0] for i in over]), min([i[1] for i in over]), max([i[0]+i[2] for i in over]) - min([i[0] for i in over]), max([i[1]+i[3] for i in over]) - min([i[1] for i in over])))
# final.add((np.mean([i[0] for i in over]), np.mean([i[1] for i in over]), np.mean([i[2] for i in over]), np.mean([i[3] for i in over])))
noover.append(False)
else: #No overlap
final.add((x1,y1,w1,h1))
noover.append(True)
if all(noover):
return
else:
draw_superbox(final)
return
def contains_remove():
for x, y, w, h in merged_candidates:
f = False
temp = set(merged_candidates)
temp.remove((x, y, w, h))
for x1, y1, w1, h1 in temp:
if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
f = False
break
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
else:
f = True
if f == True:
refined.add((x, y, w, h))
# def contains_remove():
# for x, y, w, h in merged_candidates:
# temp = set(merged_candidates)
# temp.remove((x, y, w, h))
# test = []
# for x1, y1, w1, h1 in temp:
# A = {'x1': x, 'y1': y, 'x2': x+w, 'y2': y+h, 'w': w, 'h': h}
# B = {'x1': x1, 'y1': y1, 'x2': x1+w1, 'y2': y1+h1, 'w': w1, 'h': h1}
# # overlap between A and B
# SA = A['w']*A['h']
# SB = B['w']*B['h']
# SI = np.max([ 0, np.min([A['x2'],B['x2']]) - np.max([A['x1'],B['x1']]) ]) * np.max([ 0, np.min([A['y2'],B['y2']]) - np.max([A['y1'],B['y1']]) ])
# SU = SA + SB - SI
# overlap_AB = float(SI) / float(SU)
# if overlap_AB > 0.0:
# # if x1>=x and y1 >= y and x1+w1 <= x+w and y1+h1 <= y+h:
# if x1<=x and y1 <= y and x1+w1 >= x+w and y1+h1 >= y+h:
# test.append(False)
# else:
# test.append(True)
# else:
# test.append(True)
# if all(test):
# refined.add((x, y, w, h))
def mean_rect(l):
return (min([i[0] for i in l]), min([i[1] for i in l]), max([i[0]+i[2] for i in l]) - min([i[0] for i in l]), max([i[1]+i[3] for i in l]) - min([i[1] for i in l]))
def merge():
global width, height
thresh = int(((width+height)/2)*(0.14))
tempc = set()
for x, y, w, h in candidates:
if (x, y, w, h) in tempc: continue
temp = set()
temp.add((x, y, w, h))
for x1, y1, w1, h1 in candidates:
if abs(x1-x) <= thresh and abs(y1-y) <= thresh and abs(w1-w) <= thresh and abs(h1-h) <= thresh:
temp.add((x1, y1, w1, h1))
tempc.add((x1, y1, w1, h1))
merged_candidates.add(mean_rect(temp))
contains_remove()
for name in os.listdir("./Images"):
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
print("Processing Image " + name.split(".")[0])
fname = "./Images/" + name
print(fname)
img = skimage.io.imread(fname)
width = len(img[0])
height = len(img)
# new_size = 256
# height = int(new_size * height / width)
# width = new_size
if width*height < 256*256*(0.95) and abs(width-height) <= 3 :
new_size = 512
height = int(new_size * height / width)
width = new_size
print("A")
elif width*height < 220*220*(1.11):
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B")
elif width*height < 256*256:
new_size = 256
height = int(new_size * height / width)
width = new_size
print("B1")
elif width*height > 512*512*(0.99) and width < 800 and height < 800:
new_size = 512
height = int(new_size * height / width)
width = new_size
print("C")
elif width*height < 512*512*(0.95) and width*height > 256*256*(1.15):
new_size = 512
height = int(new_size * height / width)
width = new_size
print("D")
tried = []
while True:
tried.append(width)
candidates = set()
merged_candidates = set()
refined = set()
final = set()
final_extended = set()
text_boxes = set()
text=set()
text_cut = set()
no_text = set()
stage = 1
text_cut_final = set()
for sc in [350,450,500]:
for sig in [0.8]:
for mins in [30,60,120]: # important
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]):
pass
else:
img = skimage.transform.resize(img, (height, width))
img_lbl, regions = selectivesearch.selective_search(
img, scale=sc, sigma= sig,min_size = mins)
for r in regions:
# excluding same rectangle (with different segments)
if r['rect'] in candidates:
continue
# excluding regions smaller than 2000 pixels
if r['size'] < 2000:
continue
# distorted rects
x, y, w, h = r['rect']
if w / h > 1.2 or h / w > 1.2:
continue
if w >= (img.shape[0]-1)*(0.7) and h >= (img.shape[1]-1)*(0.7):
continue
candidates.add(r['rect'])
print("Stage " + str(stage) + " Complete.")
stage+=1
print(candidates)
merge()
print(refined)
draw_superbox()
print(final)
extend_superbox()
print(final_extended)
os.makedirs("Regions/"+name.split(".")[0])
# draw rectangles on the original image
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in final_extended:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Regions/"+name.split(".")[0]+"/FinalRegions.png")
plt.close('all')
img1 = skimage.io.imread(fname)[:,:,:3]
if height == len(img1) and width == len(img1[0]): pass
else: img1 = skimage.transform.resize(img1, (height, width))
# imgT = Image.open(fname).convert('L')
# w, h = imgT.size
# if height == h and width == w:
# pass
# else:
# # img1 = skimage.transform.resize(img1, (height, width))
# imgT = imgT.resize((width,height), Image.ANTIALIAS)
ij = 1
fList = []
box_list = []
for x, y, w, h in final_extended:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg", img1[y:y+h,x:x+w])
# imgT.crop((x,y,x+w,y+h)).save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
# imgT = Image.open("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.png").convert('L')
# imgT.save("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub_b.png")
fList.append("Regions/"+name.split(".")[0]+"/"+str(ij)+"_sub.jpg")
box_list.append((x, y, w, h))
ij+=1
# classify text no text
text_boxes=set()
text = set()
no_text = set()
both_text = set()
text_cut_final = set()
i = 0
try:
a = getClass(fList)
l = np.array([0,1,2])
for pred in a:
idx = list((-pred).argsort())
pred = l[np.array(idx)]
if pred[0] == 1 or pred[0] == 2:
text_boxes.add(box_list[i])
elif pred[0] == 0:
no_text.add(box_list[i])
if pred[0] == 2:
both_text.add(box_list[i])
print(pred)
i+=1
except:
print("No Text Regions")
draw_textbox()
print(text)
texbox_cut()
print(text_cut)
texbox_ext()
print(text_cut_final)
# draw rectangles on the original image
img = skimage.io.imread(fname)[:,:,:3]
if height == len(img) and width == len(img[0]): pass
else: img = skimage.transform.resize(img, (height, width))
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(img)
for x, y, w, h in text_cut_final:
rect = mpatches.Rectangle((x, y), w, h, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.savefig("Result/final_"+name.split(".")[0]+".png")
plt.close('all')
ij = 1
for x, y, w, h in text_cut_final:
skimage.io.imsave("Regions/"+name.split(".")[0]+"/"+str(ij)+"_text.png", img[y:y+h,x:x+w])
ij+=1
# min area check
minf = 0
for x, y, w, h in text_cut_final:
if w*h < width*height*0.20 and (w < width*0.20 or h < height*0.20):
minf = 1
if (len(text_cut_final) == 0 or minf == 1) and len(tried) < 3:
print(tried)
print("New size being tried.")
shutil.rmtree("Regions/"+name.split(".")[0]+"/")
img = skimage.io.imread(fname)
twidth = len(img[0])
theight = len(img)
new_size = list(set([256,512,twidth]) - set(tried))[0]
height = int(new_size * theight / twidth)
width = new_size
else:
break
| apache-2.0 |
canaltinova/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorselenium.py | 5 | 12945 | import json
import os
import socket
import sys
import threading
import time
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
class SeleniumBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
self.webdriver.set_script_timeout(timeout * 1000)
@property
def current_window(self):
return self.webdriver.current_window_handle
def set_window(self, handle):
self.webdriver.switch_to_window(handle)
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def load_runner(self, url_protocol):
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.webdriver.execute_script("document.title = '%s'" %
threading.current_thread().name.replace("'", '"'))
def close_old_windows(self):
exclude = self.webdriver.current_window_handle
handles = [item for item in self.webdriver.window_handles if item != exclude]
for handle in handles:
try:
self.webdriver.switch_to_window(handle)
self.webdriver.close()
except exceptions.NoSuchWindowException:
pass
self.webdriver.switch_to_window(exclude)
return exclude
def get_test_window(self, window_id, parent):
test_window = None
if window_id:
try:
# Try this, it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % self.window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.window_handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
else:
raise Exception("unable to find test window")
assert test_window != parent
return test_window
class SeleniumSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find_elements_by_css_selector(selector)
class SeleniumClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
return element.click()
class SeleniumSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
return element.send_keys(keys)
class SeleniumTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class SeleniumProtocol(Protocol):
implements = [SeleniumBaseProtocolPart,
SeleniumTestharnessProtocolPart,
SeleniumSelectorProtocolPart,
SeleniumClickProtocolPart,
SeleniumSendKeysProtocolPart,
SeleniumTestDriverProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
super(SeleniumProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via Selenium's WebDriver implementation."""
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
def after_conect(self):
pass
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class SeleniumRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", e)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script(self.script % format_map)
test_window = protocol.testharness.get_test_window(webdriver, parent_window)
handler = CallbackHandler(self.logger, protocol, test_window)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
self.protocol.webdriver.set_window_size(600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
| mpl-2.0 |
LukeM12/samba | lib/testtools/testtools/tags.py | 14 | 1070 | # Copyright (c) 2012 testtools developers. See LICENSE for details.
"""Tag support."""
class TagContext(object):
"""A tag context."""
def __init__(self, parent=None):
"""Create a new TagContext.
:param parent: If provided, uses this as the parent context. Any tags
that are current on the parent at the time of construction are
current in this context.
"""
self.parent = parent
self._tags = set()
if parent:
self._tags.update(parent.get_current_tags())
def get_current_tags(self):
"""Return any current tags."""
return set(self._tags)
def change_tags(self, new_tags, gone_tags):
"""Change the tags on this context.
:param new_tags: A set of tags to add to this context.
:param gone_tags: A set of tags to remove from this context.
:return: The tags now current on this context.
"""
self._tags.update(new_tags)
self._tags.difference_update(gone_tags)
return self.get_current_tags()
| gpl-3.0 |
tersmitten/ansible | lib/ansible/modules/network/aci/mso_site.py | 19 | 6225 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_site
short_description: Manage sites
description:
- Manage sites on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
apic_password:
description:
- The password for the APICs.
type: str
required: yes
apic_site_id:
description:
- The site ID of the APICs.
type: str
required: yes
apic_username:
description:
- The username for the APICs.
type: str
required: yes
default: admin
site:
description:
- The name of the site.
type: str
required: yes
aliases: [ name ]
labels:
description:
- The labels for this site.
- Labels that do not already exist will be automatically created.
type: list
location:
description:
- Location of the site.
suboptions:
latitude:
description:
- The latitude of the location of the site.
type: float
longitude:
description:
- The longitude of the location of the site.
type: float
urls:
description:
- A list of URLs to reference the APICs.
type: list
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new site
mso_site:
host: mso_host
username: admin
password: SomeSecretPassword
site: north_europe
description: North European Datacenter
apic_username: mso_admin
apic_password: AnotherSecretPassword
apic_site_id: 12
urls:
- 10.2.3.4
- 10.2.4.5
- 10.3.5.6
labels:
- NEDC
- Europe
- Diegem
location:
latitude: 50.887318
longitude: 4.447084
state: present
delegate_to: localhost
- name: Remove a site
mso_site:
host: mso_host
username: admin
password: SomeSecretPassword
site: north_europe
state: absent
delegate_to: localhost
- name: Query a site
mso_site:
host: mso_host
username: admin
password: SomeSecretPassword
site: north_europe
state: query
delegate_to: localhost
register: query_result
- name: Query all sites
mso_site:
host: mso_host
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, issubset
def main():
location_arg_spec = dict(
latitude=dict(type='float'),
longitude=dict(type='float'),
)
argument_spec = mso_argument_spec()
argument_spec.update(
apic_password=dict(type='str', no_log=True),
apic_site_id=dict(type='str'),
apic_username=dict(type='str', default='admin'),
labels=dict(type='list'),
location=dict(type='dict', options=location_arg_spec),
site=dict(type='str', aliases=['name']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
urls=dict(type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['site']],
['state', 'present', ['apic_site_id', 'site']],
],
)
apic_username = module.params['apic_username']
apic_password = module.params['apic_password']
apic_site_id = module.params['apic_site_id']
site = module.params['site']
location = module.params['location']
if location is not None:
latitude = module.params['location']['latitude']
longitude = module.params['location']['longitude']
state = module.params['state']
urls = module.params['urls']
mso = MSOModule(module)
site_id = None
path = 'sites'
# Convert labels
labels = mso.lookup_labels(module.params['labels'], 'site')
# Query for mso.existing object(s)
if site:
mso.existing = mso.get_obj(path, name=site)
if mso.existing:
site_id = mso.existing['id']
# If we found an existing object, continue with it
path = 'sites/{id}'.format(id=site_id)
else:
mso.existing = mso.query_objs(path)
if state == 'query':
pass
elif state == 'absent':
mso.previous = mso.existing
if mso.existing:
if module.check_mode:
mso.existing = {}
else:
mso.existing = mso.request(path, method='DELETE', qs=dict(force='true'))
elif state == 'present':
mso.previous = mso.existing
payload = dict(
apicSiteId=apic_site_id,
id=site_id,
name=site,
urls=urls,
labels=labels,
username=apic_username,
password=apic_password,
)
if location is not None:
payload['location'] = dict(
lat=latitude,
long=longitude,
)
mso.sanitize(payload, collate=True)
if mso.existing:
if not issubset(mso.sent, mso.existing):
if module.check_mode:
mso.existing = mso.proposed
else:
mso.existing = mso.request(path, method='PUT', data=mso.sent)
else:
if module.check_mode:
mso.existing = mso.proposed
else:
mso.existing = mso.request(path, method='POST', data=mso.sent)
if 'password' in mso.existing:
mso.existing['password'] = '******'
mso.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
mcardacci/tools_of_the_dark_arts | droopescan/dscan/common/plugins_util.py | 6 | 4208 | from cement.core import handler
from dscan.common import file_len, VersionsFile
from dscan.plugins.internal.base_plugin import BasePlugin
import dscan
import dscan.plugins
import pkgutil
import subprocess
_base_plugins = None
_rfu = None
_vf = None
def plugins_get():
plugins = plugins_base_get()
return_plugins = []
for p in plugins:
plugin = Plugin(p)
return_plugins.append(plugin)
return return_plugins
def plugins_base_get():
global _base_plugins
if _base_plugins:
return _base_plugins
controllers = []
package = dscan.plugins
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__):
if not ispkg and not modname == 'example':
module = __import__("dscan.plugins." + modname, fromlist="dscan.plugins")
c = getattr(module, modname[0].upper() + modname[1:])
controllers.append(c)
plugins = []
for c in controllers:
is_base_scan = c.__name__.lower() == 'scan'
if issubclass(c, BasePlugin) and not is_base_scan:
plugins.append(c)
_base_plugins = plugins
return plugins
def get_rfu():
"""
Returns a list of al "regular file urls" for all plugins.
"""
global _rfu
if _rfu:
return _rfu
plugins = plugins_base_get()
rfu = []
for plugin in plugins:
if isinstance(plugin.regular_file_url, str):
rfu.append(plugin.regular_file_url)
else:
rfu += plugin.regular_file_url
_rfu = rfu
return rfu
def plugin_get_rfu(plugin):
"""
Returns "regular file urls" for a particular plugin.
@param plugin: plugin class.
"""
if isinstance(plugin.regular_file_url, str):
rfu = [plugin.regular_file_url]
else:
rfu = plugin.regular_file_url
return rfu
def get_vf():
global _vf
if _vf:
return _vf
plugins = plugins_base_get()
vf = {}
for plugin in plugins:
v = VersionsFile(dscan.PWD + "plugins/%s/versions.xml" % plugin.Meta.label)
vf[plugin.Meta.label] = v
_vf = vf
return vf
def plugin_get_vf(plugin):
"""
Returns VersionFile for a particular plugin.
@param plugin: the plugin class.
"""
vf = get_vf()
return vf[plugin.Meta.label]
def plugin_get(name):
"""
Return plugin class.
@param name: the cms label.
"""
plugins = plugins_base_get()
for plugin in plugins:
if plugin.Meta.label == name:
return plugin
raise RuntimeError('CMS "%s" not known.' % name)
class Plugin(object):
plugin = None
name = None
plugins_can_enumerate = False
plugins_wordlist_size = None
plugins_mtime = None
themes_can_enumerate = False
themes_wordlist_size = None
themes_mtime = None
interesting_can_enumerate = False
interesting_urls_size = None
version_can_enumerate = False
version_highest = None
def __init__(self, PluginClass=None):
"""
@param PluginClass: as returned by handler.list('controller'). Must
extend BasePlugin.
"""
plugin = PluginClass()
if plugin:
self.name = plugin._meta.label
if plugin.can_enumerate_plugins:
self.plugins_can_enumerate = True
self.plugins_wordlist_size = file_len(plugin.plugins_file)
if plugin.can_enumerate_themes:
self.themes_can_enumerate = True
self.themes_wordlist_size = file_len(plugin.themes_file)
if plugin.can_enumerate_interesting:
self.interesting_can_enumerate = True
self.interesting_url_size = len(plugin.interesting_urls)
if plugin.can_enumerate_version:
versions_file = VersionsFile(plugin.versions_file)
self.version_can_enumerate = True
hvm = versions_file.highest_version_major(plugin.update_majors)
self.version_highest = ', '.join(hvm.values())
def file_mtime(self, file_path):
out = subprocess.check_output(['git', 'log', '-1', '--format=%cr',
file_path]).strip()
return out
| gpl-3.0 |
MIPS/external-chromium_org | chrome/test/functional/youtube.py | 68 | 8918 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import time
import pyauto_functional
import pyauto
import pyauto_errors
import test_utils
class YoutubeTestHelper():
"""Helper functions for Youtube tests.
For sample usage, look at class YoutubeTest.
"""
# YouTube player states
is_unstarted = '-1'
is_playing = '1'
is_paused = '2'
has_ended = '0'
_pyauto = None
def __init__(self, pyauto):
self._pyauto = pyauto
def IsFlashPluginEnabled(self):
"""Verify flash plugin availability and its state."""
return [x for x in self._pyauto.GetPluginsInfo().Plugins() \
if x['name'] == 'Shockwave Flash' and x['enabled']]
def AssertPlayerState(self, state, msg):
expected_regex = '^%s$' % state
self.WaitForDomNode('id("playerState")', expected_value=expected_regex,
msg=msg)
def WaitUntilPlayerReady(self):
"""Verify that player is ready."""
self.AssertPlayerState(state=self.is_unstarted,
msg='Failed to load youtube player.')
def GetPlayerState(self):
"""Returns a player state."""
js = """
var val = ytplayer.getPlayerState();
window.domAutomationController.send(val + '');
"""
return self._pyauto.ExecuteJavascript(js)
def GetVideoInfo(self):
"""Returns Youtube video info."""
youtube_apis = self._pyauto.GetPrivateInfo()['youtube_api']
youtube_debug_text = youtube_apis['GetDebugText']
return self._pyauto.ExecuteJavascript(
'window.domAutomationController.send(%s);' % youtube_debug_text)
def GetVideoDroppedFrames(self):
"""Returns total Youtube video dropped frames.
Returns:
-1 if failed to get video frames from the video data
"""
video_data = self._pyauto.GetVideoInfo()
matched = re.search('droppedFrames=([\d\.]+)', video_data)
if matched:
return int(matched.group(1))
else:
return -1
def GetVideoFrames(self):
"""Returns Youtube video frames/second.
Returns:
-1 if failed to get droppd frames from the video data.
"""
video_data = self._pyauto.GetVideoInfo()
matched = re.search('videoFps=([\d\.]+)', video_data)
if matched:
return int(matched.group(1))
else:
return -1
def GetVideoTotalBytes(self):
"""Returns video total size in bytes.
To call this function, video must be in the paying state,
or this returns 0.
"""
total_bytes = 0
total_bytes = self._pyauto.ExecuteJavascript("""
bytesTotal = document.getElementById("bytesTotal");
window.domAutomationController.send(bytesTotal.innerHTML);
""")
return int(total_bytes)
def GetVideoLoadedBytes(self):
"""Returns video size in bytes."""
loaded_bytes = 0
loaded_bytes = self.ExecuteJavascript("""
bytesLoaded = document.getElementById("bytesLoaded");
window.domAutomationController.send(bytesLoaded.innerHTML);
""")
return int(loaded_bytes)
def GetCurrentVideoTime(self):
"""Returns the current time of the video in seconds."""
current_time = 0
current_time = self.ExecuteJavascript("""
videoCurrentTime = document.getElementById("videoCurrentTime");
window.domAutomationController.send(videoCurrentTime.innerHTML);
""")
return int(current_time)
def PlayVideo(self):
"""Plays the loaded video."""
self._pyauto.ExecuteJavascript("""
ytplayer.playVideo();
window.domAutomationController.send('');
""")
def StopVideo(self):
"""Stops the video and cancels loading."""
self._pyauto.ExecuteJavascript("""
ytplayer.stopVideo();
window.domAutomationController.send('');
""")
def PauseVideo(self):
"""Pause the video."""
self.ExecuteJavascript("""
ytplayer.pauseVideo();
window.domAutomationController.send('');
""")
def PlayVideoAndAssert(self, youtube_video='zuzaxlddWbk',
ignore_assert=False):
"""Start video and assert the playing state.
By default test uses http://www.youtube.com/watch?v=zuzaxlddWbki.
Args:
youtube_video: The string ID of the youtube video to play.
ignore_assert: flag to ignore the assertion and continue the test.
"""
self._pyauto.assertTrue(self._pyauto.IsFlashPluginEnabled(),
msg='From here Flash plugin is disabled or not available.')
url = self._pyauto.GetHttpURLForDataPath(
'media', 'youtube.html?video=' + youtube_video)
self._pyauto.NavigateToURL(url)
self.WaitUntilPlayerReady()
i = 0
# The YouTube player will get in a state where it does not return the
# number of loaded bytes. When this happens we need to reload the page
# before starting the test.
while self.GetVideoLoadedBytes() == 1 and i < 30:
self._pyauto.NavigateToURL(url)
self.WaitUntilPlayerReady()
i = i + 1
self.PlayVideo()
if ignore_assert:
return self.is_playing
self.AssertPlayerState(state=self.is_playing,
msg='Player did not enter the playing state.')
def VideoBytesLoadingAndAssert(self):
"""Assert the video loading."""
total_bytes = self.GetVideoTotalBytes()
prev_loaded_bytes = 0
loaded_bytes = 0
count = 0
while loaded_bytes < total_bytes:
# We want to test bytes loading only twice
count = count + 1
if count == 2:
break
loaded_bytes = self.GetVideoLoadedBytes()
self.assertTrue(prev_loaded_bytes <= loaded_bytes)
prev_loaded_bytes = loaded_bytes
# Give some time to load a video
time.sleep(1)
def PlayFAVideo(self):
"""Play and assert FA video playing.
We are using multiple test videos in case any FA video playback fails
becuase other tests are palying the same video and the test gets the
simultaneous playback error.
"""
fa_videos = ('APRpcscmbY0', 'yQqvrED-np0', 'KJuFw6hQdNY',
'BeFQbgxr_9g', 'L6JwlOudqA4')
credentials = self.GetPrivateInfo()['test_fa_account']
test_utils.GoogleAccountsLogin(self,
credentials['username'], credentials['password'])
for video in fa_videos:
result = self.PlayVideoAndAssert(video, ignore_assert=True)
if result is self.is_playing:
return
self.assertTrue(False, msg='Player did not enter the playing state.')
class YoutubeTest(pyauto.PyUITest, YoutubeTestHelper):
"""Test case for Youtube videos."""
def __init__(self, methodName='runTest', **kwargs):
pyauto.PyUITest.__init__(self, methodName, **kwargs)
YoutubeTestHelper.__init__(self, self)
def testPlayerStatus(self):
"""Test that YouTube loads a player and changes player states.
Test verifies various player states like unstarted, playing, paused
and ended.
"""
# Navigating to Youtube video. This video is 122 seconds long.
# During tests, we are not goinig to play this video full.
self.PlayVideoAndAssert()
self.PauseVideo()
self.AssertPlayerState(state=self.is_paused,
msg='Player did not enter the paused state.')
# Seek to the end of video
self.ExecuteJavascript("""
val = ytplayer.getDuration();
ytplayer.seekTo(val, true);
window.domAutomationController.send('');
""")
self.PlayVideo()
# We've seeked to almost the end of the video but not quite.
# Wait until the end.
self.AssertPlayerState(state=self.has_ended,
msg='Player did not reach the stopped state.')
def testPlayerResolution(self):
"""Test various video resolutions."""
self.PlayVideoAndAssert()
resolutions = self.ExecuteJavascript("""
res = ytplayer.getAvailableQualityLevels();
window.domAutomationController.send(res.toString());
""")
resolutions = resolutions.split(',')
for res in resolutions:
self.ExecuteJavascript("""
ytplayer.setPlaybackQuality('%s');
window.domAutomationController.send('');
""" % res)
curr_res = self.ExecuteJavascript("""
res = ytplayer.getPlaybackQuality();
window.domAutomationController.send(res + '');
""")
self.assertEqual(res, curr_res, msg='Resolution is not set to %s.' % res)
def testPlayerBytes(self):
"""Test that player downloads video bytes."""
self.PlayVideoAndAssert()
self.VideoBytesLoadingAndAssert()
def testFAVideo(self):
"""Test that FlashAccess/DRM video plays."""
self.PlayFAVideo()
self.StopVideo()
def testFAVideoBytes(self):
"""Test FlashAccess/DRM video bytes loading."""
self.PlayFAVideo()
self.VideoBytesLoadingAndAssert()
self.StopVideo()
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
juhans/ardupilot | mk/PX4/Tools/genmsg/test/test_genmsg_command_line.py | 216 | 1937 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
def test_includepath_to_dict():
from genmsg.command_line import includepath_to_dict
assert {} == includepath_to_dict([])
assert {'std_msgs': [ 'foo' ]} == includepath_to_dict(['std_msgs:foo'])
assert {'std_msgs': [ 'foo' ], 'bar_msgs': [ 'baz:colon' ]} == includepath_to_dict(['std_msgs:foo', 'bar_msgs:baz:colon'])
| gpl-3.0 |
smunaut/gnuradio | grc/base/ParseXML.py | 4 | 4000 | """
Copyright 2008 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from lxml import etree
from . import odict
xml_failures = {}
class XMLSyntaxError(Exception):
def __init__(self, error_log):
self._error_log = error_log
xml_failures[error_log.last_error.filename] = error_log
def __str__(self):
return '\n'.join(map(str, self._error_log.filter_from_errors()))
def validate_dtd(xml_file, dtd_file=None):
"""
Validate an xml file against its dtd.
Args:
xml_file: the xml file
dtd_file: the optional dtd file
@throws Exception validation fails
"""
#perform parsing, use dtd validation if dtd file is not specified
try:
parser = etree.XMLParser(dtd_validation=not dtd_file)
xml = etree.parse(xml_file, parser=parser)
except etree.LxmlError:
pass
if parser.error_log:
raise XMLSyntaxError(parser.error_log)
# perform dtd validation if the dtd file is specified
if not dtd_file:
return
try:
dtd = etree.DTD(dtd_file)
if not dtd.validate(xml.getroot()):
raise XMLSyntaxError(dtd.error_log)
except etree.LxmlError:
raise XMLSyntaxError(dtd.error_log)
def from_file(xml_file):
"""
Create nested data from an xml file using the from xml helper.
Args:
xml_file: the xml file path
Returns:
the nested data
"""
xml = etree.parse(xml_file).getroot()
return _from_file(xml)
def _from_file(xml):
"""
Recursivly parse the xml tree into nested data format.
Args:
xml: the xml tree
Returns:
the nested data
"""
tag = xml.tag
if not len(xml):
return odict({tag: xml.text or ''}) #store empty tags (text is None) as empty string
nested_data = odict()
for elem in xml:
key, value = _from_file(elem).items()[0]
if nested_data.has_key(key): nested_data[key].append(value)
else: nested_data[key] = [value]
#delistify if the length of values is 1
for key, values in nested_data.iteritems():
if len(values) == 1: nested_data[key] = values[0]
return odict({tag: nested_data})
def to_file(nested_data, xml_file):
"""
Write an xml file and use the to xml helper method to load it.
Args:
nested_data: the nested data
xml_file: the xml file path
"""
xml = _to_file(nested_data)[0]
open(xml_file, 'w').write(etree.tostring(xml, xml_declaration=True, pretty_print=True))
def _to_file(nested_data):
"""
Recursivly parse the nested data into xml tree format.
Args:
nested_data: the nested data
Returns:
the xml tree filled with child nodes
"""
nodes = list()
for key, values in nested_data.iteritems():
#listify the values if not a list
if not isinstance(values, (list, set, tuple)):
values = [values]
for value in values:
node = etree.Element(key)
if isinstance(value, (str, unicode)): node.text = value
else: node.extend(_to_file(value))
nodes.append(node)
return nodes
if __name__ == '__main__':
"""Use the main method to test parse xml's functions."""
pass
| gpl-3.0 |
garg10may/youtube-dl | youtube_dl/extractor/slideshare.py | 126 | 2003 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
ExtractorError,
)
class SlideshareIE(InfoExtractor):
_VALID_URL = r'https?://www\.slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)'
_TEST = {
'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
'info_dict': {
'id': '25665706',
'ext': 'mp4',
'title': 'Managing Scale and Complexity',
'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_title = mobj.group('title')
webpage = self._download_webpage(url, page_title)
slideshare_obj = self._search_regex(
r'\$\.extend\(slideshare_object,\s*(\{.*?\})\);',
webpage, 'slideshare object')
info = json.loads(slideshare_obj)
if info['slideshow']['type'] != 'video':
raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)
doc = info['doc']
bucket = info['jsplayer']['video_bucket']
ext = info['jsplayer']['video_extension']
video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
description = self._html_search_regex(
r'(?s)<p[^>]+itemprop="description"[^>]*>(.+?)</p>', webpage,
'description', fatal=False)
return {
'_type': 'video',
'id': info['slideshow']['id'],
'title': info['slideshow']['title'],
'ext': ext,
'url': video_url,
'thumbnail': info['slideshow']['pin_image_url'],
'description': description,
}
| unlicense |
MattsFleaMarket/python-for-android | python-modules/twisted/twisted/internet/test/test_base.py | 56 | 8160 | # Copyright (c) 2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.base}.
"""
import socket
from Queue import Queue
from zope.interface import implements
from twisted.python.threadpool import ThreadPool
from twisted.python.util import setIDFunction
from twisted.internet.interfaces import IReactorTime, IReactorThreads
from twisted.internet.error import DNSLookupError
from twisted.internet.base import ThreadedResolver, DelayedCall
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
class FakeReactor(object):
"""
A fake reactor implementation which just supports enough reactor APIs for
L{ThreadedResolver}.
"""
implements(IReactorTime, IReactorThreads)
def __init__(self):
self._clock = Clock()
self.callLater = self._clock.callLater
self._threadpool = ThreadPool()
self._threadpool.start()
self.getThreadPool = lambda: self._threadpool
self._threadCalls = Queue()
def callFromThread(self, f, *args, **kwargs):
self._threadCalls.put((f, args, kwargs))
def _runThreadCalls(self):
f, args, kwargs = self._threadCalls.get()
f(*args, **kwargs)
def _stop(self):
self._threadpool.stop()
class ThreadedResolverTests(TestCase):
"""
Tests for L{ThreadedResolver}.
"""
def test_success(self):
"""
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires
with the value returned by the call to L{socket.gethostbyname} in the
threadpool of the reactor passed to L{ThreadedResolver.__init__}.
"""
ip = "10.0.0.17"
name = "foo.bar.example.com"
timeout = 30
reactor = FakeReactor()
self.addCleanup(reactor._stop)
lookedUp = []
resolvedTo = []
def fakeGetHostByName(name):
lookedUp.append(name)
return ip
self.patch(socket, 'gethostbyname', fakeGetHostByName)
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName(name, (timeout,))
d.addCallback(resolvedTo.append)
reactor._runThreadCalls()
self.assertEqual(lookedUp, [name])
self.assertEqual(resolvedTo, [ip])
# Make sure that any timeout-related stuff gets cleaned up.
reactor._clock.advance(timeout + 1)
self.assertEqual(reactor._clock.calls, [])
def test_failure(self):
"""
L{ThreadedResolver.getHostByName} returns a L{Deferred} which fires a
L{Failure} if the call to L{socket.gethostbyname} raises an exception.
"""
timeout = 30
reactor = FakeReactor()
self.addCleanup(reactor._stop)
def fakeGetHostByName(name):
raise IOError("ENOBUFS (this is a funny joke)")
self.patch(socket, 'gethostbyname', fakeGetHostByName)
failedWith = []
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName("some.name", (timeout,))
self.assertFailure(d, DNSLookupError)
d.addCallback(failedWith.append)
reactor._runThreadCalls()
self.assertEqual(len(failedWith), 1)
# Make sure that any timeout-related stuff gets cleaned up.
reactor._clock.advance(timeout + 1)
self.assertEqual(reactor._clock.calls, [])
def test_timeout(self):
"""
If L{socket.gethostbyname} does not complete before the specified
timeout elapsed, the L{Deferred} returned by
L{ThreadedResolver.getHostByBame} fails with L{DNSLookupError}.
"""
timeout = 10
reactor = FakeReactor()
self.addCleanup(reactor._stop)
result = Queue()
def fakeGetHostByName(name):
raise result.get()
self.patch(socket, 'gethostbyname', fakeGetHostByName)
failedWith = []
resolver = ThreadedResolver(reactor)
d = resolver.getHostByName("some.name", (timeout,))
self.assertFailure(d, DNSLookupError)
d.addCallback(failedWith.append)
reactor._clock.advance(timeout - 1)
self.assertEqual(failedWith, [])
reactor._clock.advance(1)
self.assertEqual(len(failedWith), 1)
# Eventually the socket.gethostbyname does finish - in this case, with
# an exception. Nobody cares, though.
result.put(IOError("The I/O was errorful"))
class DelayedCallTests(TestCase):
"""
Tests for L{DelayedCall}.
"""
def _getDelayedCallAt(self, time):
"""
Get a L{DelayedCall} instance at a given C{time}.
@param time: The absolute time at which the returned L{DelayedCall}
will be scheduled.
"""
def noop(call):
pass
return DelayedCall(time, lambda: None, (), {}, noop, noop, None)
def setUp(self):
"""
Create two L{DelayedCall} instanced scheduled to run at different
times.
"""
self.zero = self._getDelayedCallAt(0)
self.one = self._getDelayedCallAt(1)
def test_str(self):
"""
The string representation of a L{DelayedCall} instance, as returned by
C{str}, includes the unsigned id of the instance, as well as its state,
the function to be called, and the function arguments.
"""
def nothing():
pass
dc = DelayedCall(12, nothing, (3, ), {"A": 5}, None, None, lambda: 1.5)
ids = {dc: 200}
def fakeID(obj):
try:
return ids[obj]
except (TypeError, KeyError):
return id(obj)
self.addCleanup(setIDFunction, setIDFunction(fakeID))
self.assertEquals(
str(dc),
"<DelayedCall 0xc8 [10.5s] called=0 cancelled=0 nothing(3, A=5)>")
def test_lt(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a < b} is true
if and only if C{a} is scheduled to run before C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(zero < one)
self.assertFalse(one < zero)
self.assertFalse(zero < zero)
self.assertFalse(one < one)
def test_le(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a <= b} is true
if and only if C{a} is scheduled to run before C{b} or at the same
time as C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(zero <= one)
self.assertFalse(one <= zero)
self.assertTrue(zero <= zero)
self.assertTrue(one <= one)
def test_gt(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
if and only if C{a} is scheduled to run after C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(one > zero)
self.assertFalse(zero > one)
self.assertFalse(zero > zero)
self.assertFalse(one > one)
def test_ge(self):
"""
For two instances of L{DelayedCall} C{a} and C{b}, C{a > b} is true
if and only if C{a} is scheduled to run after C{b} or at the same
time as C{b}.
"""
zero, one = self.zero, self.one
self.assertTrue(one >= zero)
self.assertFalse(zero >= one)
self.assertTrue(zero >= zero)
self.assertTrue(one >= one)
def test_eq(self):
"""
A L{DelayedCall} instance is only equal to itself.
"""
# Explicitly use == here, instead of assertEquals, to be more
# confident __eq__ is being tested.
self.assertFalse(self.zero == self.one)
self.assertTrue(self.zero == self.zero)
self.assertTrue(self.one == self.one)
def test_ne(self):
"""
A L{DelayedCall} instance is not equal to any other object.
"""
# Explicitly use != here, instead of assertEquals, to be more
# confident __ne__ is being tested.
self.assertTrue(self.zero != self.one)
self.assertFalse(self.zero != self.zero)
self.assertFalse(self.one != self.one)
| apache-2.0 |
wolfgangz2013/rt-thread | tools/mkdist.py | 1 | 14164 | #
# File : mkdir.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2018, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2017-10-04 Bernard The first version
import os
import shutil
from shutil import ignore_patterns
def do_copy_file(src, dst):
# check source file
if not os.path.exists(src):
return
path = os.path.dirname(dst)
# mkdir if path not exist
if not os.path.exists(path):
os.makedirs(path)
shutil.copy2(src, dst)
def do_copy_folder(src_dir, dst_dir, ignore=None):
import shutil
# check source directory
if not os.path.exists(src_dir):
return
try:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
except:
print('Deletes folder: %s failed.' % dst_dir)
return
shutil.copytree(src_dir, dst_dir, ignore = ignore)
source_ext = ['c', 'h', 's', 'S', 'cpp', 'xpm']
source_list = []
def walk_children(child):
global source_list
global source_ext
# print child
full_path = child.rfile().abspath
file_type = full_path.rsplit('.',1)[1]
#print file_type
if file_type in source_ext:
if full_path not in source_list:
source_list.append(full_path)
children = child.all_children()
if children != []:
for item in children:
walk_children(item)
def walk_kconfig(RTT_ROOT, source_list):
for parent, dirnames, filenames in os.walk(RTT_ROOT):
if 'bsp' in parent:
continue
if '.git' in parent:
continue
if 'tools' in parent:
continue
if 'Kconfig' in filenames:
pathfile = os.path.join(parent, 'Kconfig')
source_list.append(pathfile)
if 'KConfig' in filenames:
pathfile = os.path.join(parent, 'KConfig')
source_list.append(pathfile)
def bsp_copy_files(bsp_root, dist_dir):
# copy BSP files
do_copy_folder(os.path.join(bsp_root), dist_dir,
ignore_patterns('build', 'dist', '*.pyc', '*.old', '*.map', 'rtthread.bin', '.sconsign.dblite', '*.elf', '*.axf', 'cconfig.h'))
def bsp_update_sconstruct(dist_dir):
with open(os.path.join(dist_dir, 'SConstruct'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'SConstruct'), 'w') as f:
for line in data:
if line.find('RTT_ROOT') != -1:
if line.find('sys.path') != -1:
f.write('# set RTT_ROOT\n')
f.write('if not os.getenv("RTT_ROOT"): \n RTT_ROOT="rt-thread"\n\n')
f.write(line)
def bsp_update_kconfig(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('default') != -1 and found:
position = line.find('default')
line = line[0:position] + 'default: "rt-thread"\n'
found = 0
f.write(line)
def bsp_update_kconfig_library(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('../libraries') != -1 and found:
position = line.find('../libraries')
line = line[0:position] + 'libraries/Kconfig"\n'
found = 0
f.write(line)
def bs_update_ide_project(bsp_root, rtt_root):
import subprocess
# default update the projects which have template file
tgt_dict = {'mdk4':('keil', 'armcc'),
'mdk5':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'cdk':('gcc', 'gcc')}
scons_env = os.environ.copy()
scons_env['RTT_ROOT'] = rtt_root
for item in tgt_dict:
child = subprocess.Popen('scons --target=' + item, cwd=bsp_root, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
if child.returncode == 0:
print('update %s project' % item)
def zip_dist(dist_dir, dist_name):
import zipfile
zip_filename = os.path.join(dist_dir)
zip = zipfile.ZipFile(zip_filename + '.zip', 'w')
pre_len = len(os.path.dirname(dist_dir))
for parent, dirnames, filenames in os.walk(dist_dir):
for filename in filenames:
pathfile = os.path.join(parent, filename)
arcname = pathfile[pre_len:].strip(os.path.sep)
zip.write(pathfile, arcname)
zip.close()
def MkDist_Strip(program, BSP_ROOT, RTT_ROOT, Env):
global source_list
print('make distribution and strip useless files....')
dist_name = os.path.basename(BSP_ROOT)
dist_dir = os.path.join(BSP_ROOT, 'dist-strip', dist_name)
target_path = os.path.join(dist_dir, 'rt-thread')
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# copy stm32 bsp libiary files
if os.path.basename(os.path.dirname(BSP_ROOT)) == 'stm32':
print("=> copy stm32 bsp library")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
library_dir = os.path.join(dist_dir, 'libraries')
bsp_copy_files(os.path.join(library_path, 'HAL_Drivers'), os.path.join(library_dir, 'HAL_Drivers'))
bsp_copy_files(os.path.join(library_path, Env['bsp_lib_type']), os.path.join(library_dir, Env['bsp_lib_type']))
shutil.copyfile(os.path.join(library_path, 'Kconfig'), os.path.join(library_dir, 'Kconfig'))
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT)
# get all source files from program
for item in program:
walk_children(item)
source_list.sort()
# copy the source files without libcpu and components/libc in RT-Thread
target_list = []
libcpu_dir = os.path.join(RTT_ROOT, 'libcpu').lower()
libc_dir = os.path.join(RTT_ROOT, 'components', 'libc', 'compilers').lower()
sal_dir = os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket').lower()
sources_include_sal = False
for src in source_list:
if src.lower().startswith(BSP_ROOT.lower()):
continue
# skip libc and libcpu dir
if src.lower().startswith(libcpu_dir):
continue
if src.lower().startswith(libc_dir):
continue
if src.lower().startswith(sal_dir):
sources_include_sal = True
continue
if src.lower().startswith(RTT_ROOT.lower()):
target_list.append(src)
source_list = target_list
# get source directory
src_dir = []
for src in source_list:
src = src.replace(RTT_ROOT, '')
if src[0] == os.sep or src[0] == '/':
src = src[1:]
path = os.path.dirname(src)
sub_path = path.split(os.sep)
full_path = RTT_ROOT
for item in sub_path:
full_path = os.path.join(full_path, item)
if full_path not in src_dir:
src_dir.append(full_path)
# add all of SConscript files
for item in src_dir:
source_list.append(os.path.join(item, 'SConscript'))
# add all of Kconfig files
walk_kconfig(RTT_ROOT, source_list)
# copy all files to target directory
source_list.sort()
for src in source_list:
dst = src.replace(RTT_ROOT, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print('=> %s' % dst)
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
print('=> %s' % os.path.join('components', 'libc', 'compilers'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'libc', 'compilers'), os.path.join(target_path, 'components', 'libc', 'compilers'))
if sources_include_sal:
print('=> %s' % os.path.join('components', 'net', 'sal_socket'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket'), os.path.join(target_path, 'components', 'net', 'sal_socket'))
# copy all libcpu/ARCH directory
import rtconfig
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, rtconfig.CPU)))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, rtconfig.CPU), os.path.join(target_path, 'libcpu', rtconfig.ARCH, rtconfig.CPU))
if os.path.exists(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common')):
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, 'common')))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common'), os.path.join(target_path, 'libcpu', rtconfig.ARCH, 'common'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# update all project files
bs_update_ide_project(dist_dir, target_path)
# make zip package
zip_dist(dist_dir, dist_name)
print('done!')
def MkDist(program, BSP_ROOT, RTT_ROOT, Env):
print('make distribution....')
dist_name = os.path.basename(BSP_ROOT)
dist_dir = os.path.join(BSP_ROOT, 'dist', dist_name)
target_path = os.path.join(dist_dir, 'rt-thread')
# copy BSP files
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# copy stm32 bsp libiary files
if os.path.basename(os.path.dirname(BSP_ROOT)) == 'stm32':
print("=> copy stm32 bsp library")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
library_dir = os.path.join(dist_dir, 'libraries')
bsp_copy_files(os.path.join(library_path, 'HAL_Drivers'), os.path.join(library_dir, 'HAL_Drivers'))
bsp_copy_files(os.path.join(library_path, Env['bsp_lib_type']), os.path.join(library_dir, Env['bsp_lib_type']))
shutil.copyfile(os.path.join(library_path, 'Kconfig'), os.path.join(library_dir, 'Kconfig'))
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT)
# copy tools directory
print('=> components')
do_copy_folder(os.path.join(RTT_ROOT, 'components'), os.path.join(target_path, 'components'))
# skip documentation directory
# skip examples
# copy include directory
print('=> include')
do_copy_folder(os.path.join(RTT_ROOT, 'include'), os.path.join(target_path, 'include'))
# copy all libcpu/ARCH directory
print('=> libcpu')
import rtconfig
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH), os.path.join(target_path, 'libcpu', rtconfig.ARCH))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
# copy src directory
print('=> src')
do_copy_folder(os.path.join(RTT_ROOT, 'src'), os.path.join(target_path, 'src'))
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# update all project files
bs_update_ide_project(dist_dir, target_path)
# make zip package
zip_dist(dist_dir, dist_name)
print('done!')
| apache-2.0 |
flyher/pymo | android/pgs4a-0.9.6/python-install/lib/python2.7/sqlite3/dbapi2.py | 161 | 2615 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import time
from _sqlite3 import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = buffer
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int(timepart_full[1])
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
| mit |
GeoCat/QGIS | scripts/parse_dash_results.py | 8 | 14485 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
****************************3***********************************************
parse_dash_results.py
---------------------
Date : October 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
__author__ = 'Nyall Dawson'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import argparse
import urllib.request
import urllib.parse
import urllib.error
import re
from bs4 import BeautifulSoup
from PyQt5.QtGui import (
QImage, QColor, qRed, qBlue, qGreen, qAlpha, qRgb, QPixmap)
from PyQt5.QtWidgets import (QDialog,
QApplication,
QLabel,
QVBoxLayout,
QHBoxLayout,
QGridLayout,
QPushButton,
QDoubleSpinBox,
QMessageBox,
QWidget,
QScrollArea)
import struct
import glob
dash_url = 'https://dash.orfeo-toolbox.org'
def error(msg):
print(msg)
sys.exit(1)
def colorDiff(c1, c2):
redDiff = abs(qRed(c1) - qRed(c2))
greenDiff = abs(qGreen(c1) - qGreen(c2))
blueDiff = abs(qBlue(c1) - qBlue(c2))
alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
return max(redDiff, greenDiff, blueDiff, alphaDiff)
def imageFromPath(path):
print(path)
if (path[:8] == 'https://' or path[:7] == 'file://'):
# fetch remote image
print('fetching remote!')
data = urllib.request.urlopen(path).read()
image = QImage()
image.loadFromData(data)
else:
print('using local!')
image = QImage(path)
return image
class ResultHandler(QDialog):
def __init__(self, parent=None):
super(ResultHandler, self).__init__()
self.setWindowTitle('Dash results')
self.control_label = QLabel()
self.rendered_label = QLabel()
self.diff_label = QLabel()
self.mask_label = QLabel()
self.new_mask_label = QLabel()
self.scrollArea = QScrollArea()
self.widget = QWidget()
self.test_name_label = QLabel()
grid = QGridLayout()
grid.addWidget(self.test_name_label, 0, 0)
grid.addWidget(QLabel('Control'), 1, 0)
grid.addWidget(QLabel('Rendered'), 1, 1)
grid.addWidget(QLabel('Difference'), 1, 2)
grid.addWidget(self.control_label, 2, 0)
grid.addWidget(self.rendered_label, 2, 1)
grid.addWidget(self.diff_label, 2, 2)
grid.addWidget(QLabel('Current Mask'), 3, 0)
grid.addWidget(QLabel('New Mask'), 3, 1)
grid.addWidget(self.mask_label, 4, 0)
grid.addWidget(self.new_mask_label, 4, 1)
self.widget.setLayout(grid)
self.scrollArea.setWidget(self.widget)
v_layout = QVBoxLayout()
v_layout.addWidget(self.scrollArea, 1)
next_image_button = QPushButton()
next_image_button.setText('Skip')
next_image_button.pressed.connect(self.load_next)
self.overload_spin = QDoubleSpinBox()
self.overload_spin.setMinimum(1)
self.overload_spin.setMaximum(255)
self.overload_spin.setValue(1)
self.overload_spin.valueChanged.connect(lambda: save_mask_button.setEnabled(False))
preview_mask_button = QPushButton()
preview_mask_button.setText('Preview New Mask')
preview_mask_button.pressed.connect(self.preview_mask)
preview_mask_button.pressed.connect(lambda: save_mask_button.setEnabled(True))
save_mask_button = QPushButton()
save_mask_button.setText('Save New Mask')
save_mask_button.pressed.connect(self.save_mask)
button_layout = QHBoxLayout()
button_layout.addWidget(next_image_button)
button_layout.addWidget(QLabel('Mask diff multiplier:'))
button_layout.addWidget(self.overload_spin)
button_layout.addWidget(preview_mask_button)
button_layout.addWidget(save_mask_button)
button_layout.addStretch()
v_layout.addLayout(button_layout)
self.setLayout(v_layout)
def closeEvent(self, event):
self.reject()
def parse_url(self, url):
print('Fetching dash results from: {}'.format(url))
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, "lxml")
# build up list of rendered images
measurement_img = [img for img in soup.find_all('img') if
img.get('alt') and img.get('alt').startswith('Rendered Image')]
images = {}
for img in measurement_img:
m = re.search('Rendered Image (.*?)\s', img.get('alt'))
test_name = m.group(1)
rendered_image = img.get('src')
images[test_name] = '{}/{}'.format(dash_url, rendered_image)
if images:
print('found images:\n{}'.format(images))
else:
print('no images found\n')
self.images = images
self.load_next()
def load_next(self):
if not self.images:
# all done
self.accept()
exit(0)
test_name, rendered_image = self.images.popitem()
self.test_name_label.setText(test_name)
control_image = self.get_control_image_path(test_name)
if not control_image:
self.load_next()
return
self.mask_image_path = control_image[:-4] + '_mask.png'
self.load_images(control_image, rendered_image, self.mask_image_path)
def load_images(self, control_image_path, rendered_image_path, mask_image_path):
self.control_image = imageFromPath(control_image_path)
if not self.control_image:
error('Could not read control image {}'.format(control_image_path))
self.rendered_image = imageFromPath(rendered_image_path)
if not self.rendered_image:
error(
'Could not read rendered image {}'.format(rendered_image_path))
if not self.rendered_image.width() == self.control_image.width() or not self.rendered_image.height() == self.control_image.height():
print(
'Size mismatch - control image is {}x{}, rendered image is {}x{}'.format(self.control_image.width(),
self.control_image.height(
),
self.rendered_image.width(
),
self.rendered_image.height()))
max_width = min(
self.rendered_image.width(), self.control_image.width())
max_height = min(
self.rendered_image.height(), self.control_image.height())
# read current mask, if it exist
self.mask_image = imageFromPath(mask_image_path)
if self.mask_image.isNull():
print(
'Mask image does not exist, creating {}'.format(mask_image_path))
self.mask_image = QImage(
self.control_image.width(), self.control_image.height(), QImage.Format_ARGB32)
self.mask_image.fill(QColor(0, 0, 0))
self.diff_image = self.create_diff_image(
self.control_image, self.rendered_image, self.mask_image)
if not self.diff_image:
self.load_next()
return
self.control_label.setPixmap(QPixmap.fromImage(self.control_image))
self.rendered_label.setPixmap(QPixmap.fromImage(self.rendered_image))
self.mask_label.setPixmap(QPixmap.fromImage(self.mask_image))
self.diff_label.setPixmap(QPixmap.fromImage(self.diff_image))
self.preview_mask()
def preview_mask(self):
self.new_mask_image = self.create_mask(
self.control_image, self.rendered_image, self.mask_image, self.overload_spin.value())
self.new_mask_label.setPixmap(QPixmap.fromImage(self.new_mask_image))
def save_mask(self):
self.new_mask_image.save(self.mask_image_path, "png")
self.load_next()
def create_mask(self, control_image, rendered_image, mask_image, overload=1):
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
new_mask_image = QImage(
control_image.width(), control_image.height(), QImage.Format_ARGB32)
new_mask_image.fill(QColor(0, 0, 0))
# loop through pixels in rendered image and compare
mismatch_count = 0
linebytes = max_width * 4
for y in range(max_height):
control_scanline = control_image.constScanLine(
y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(
y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(
struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
# ignore pixel
new_mask_image.setPixel(
x, y, qRgb(currentTolerance, currentTolerance, currentTolerance))
continue
expected_rgb = struct.unpack(
'I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack(
'I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = min(
255, colorDiff(expected_rgb, rendered_rgb) * overload)
if difference > currentTolerance:
# update mask image
new_mask_image.setPixel(
x, y, qRgb(difference, difference, difference))
mismatch_count += 1
else:
new_mask_image.setPixel(
x, y, qRgb(currentTolerance, currentTolerance, currentTolerance))
return new_mask_image
def get_control_image_path(self, test_name):
if os.path.isfile(test_name):
return path
# else try and find matching test image
script_folder = os.path.dirname(os.path.realpath(sys.argv[0]))
control_images_folder = os.path.join(
script_folder, '../tests/testdata/control_images')
matching_control_images = [x[0]
for x in os.walk(control_images_folder) if test_name in x[0]]
if len(matching_control_images) > 1:
QMessageBox.warning(
self, 'Result', 'Found multiple matching control images for {}'.format(test_name))
return None
elif len(matching_control_images) == 0:
QMessageBox.warning(
self, 'Result', 'No matching control images found for {}'.format(test_name))
return None
found_control_image_path = matching_control_images[0]
# check for a single matching expected image
images = glob.glob(os.path.join(found_control_image_path, '*.png'))
filtered_images = [i for i in images if not i[-9:] == '_mask.png']
if len(filtered_images) > 1:
error(
'Found multiple matching control images for {}'.format(test_name))
elif len(filtered_images) == 0:
error('No matching control images found for {}'.format(test_name))
found_image = filtered_images[0]
print('Found matching control image: {}'.format(found_image))
return found_image
def create_diff_image(self, control_image, rendered_image, mask_image):
# loop through pixels in rendered image and compare
mismatch_count = 0
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
linebytes = max_width * 4
diff_image = QImage(
control_image.width(), control_image.height(), QImage.Format_ARGB32)
diff_image.fill(QColor(152, 219, 249))
for y in range(max_height):
control_scanline = control_image.constScanLine(
y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(
y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(
struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
# ignore pixel
continue
expected_rgb = struct.unpack(
'I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack(
'I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = colorDiff(expected_rgb, rendered_rgb)
if difference > currentTolerance:
# update mask image
diff_image.setPixel(x, y, qRgb(255, 0, 0))
mismatch_count += 1
if mismatch_count:
return diff_image
else:
print('No mismatches')
return None
def main():
app = QApplication(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument('dash_url')
args = parser.parse_args()
w = ResultHandler()
w.parse_url(args.dash_url)
w.exec_()
if __name__ == '__main__':
main()
| gpl-2.0 |
dischinator/pyload | module/plugins/crypter/HoerbuchIn.py | 2 | 2738 | # -*- coding: utf-8 -*-
import re
import BeautifulSoup
from module.plugins.internal.Crypter import Crypter
class HoerbuchIn(Crypter):
__name__ = "HoerbuchIn"
__type__ = "crypter"
__version__ = "0.66"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?hoerbuch\.us/(wp/horbucher/\d+/|tp/out\.php\?.+|protection/folder_\d+\.html)'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available", True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default")]
__description__ = """Hoerbuch.in decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "spoob@pyload.org"),
("mkaay", "mkaay@mkaay.de")]
article = re.compile("http://(?:www\.)?hoerbuch\.us/wp/horbucher/\d+/.+/")
protection = re.compile("http://(?:www\.)?hoerbuch\.us/protection/folder_\d+.html")
uploaded = re.compile("http://(?:www\.)?hoerbuch\.us/protection/uploaded/(\w+)\.html")
hoster_links = re.compile("http://(?:www\.)?hoerbuch\.us/wp/goto/Download/\d+/")
def decrypt(self, pyfile):
self.pyfile = pyfile
if self.article.match(pyfile.url):
html = self.load(pyfile.url)
soup = BeautifulSoup.BeautifulSoup(html, convertEntities=BeautifulSoup.BeautifulStoneSoup.HTML_ENTITIES)
links = []
for a in soup.findAll("a", attrs={'href': self.hoster_links}):
for decrypted_link in self.decrypt_folder(a.get('href')):
links.append(decrypted_link)
self.packages.append((pyfile.name, links, pyfile.name))
else:
self.links = self.decrypt_folder(pyfile.url)
def decrypt_folder(self, url):
m = self.hoster_links.search(url) or self.protection.search(url)
if m is None:
self.fail(_("Bad URL"))
url = m.group(0)
if self.hoster_links.match(url):
self.load(url)
url = self.req.lastEffectiveURL
html = self.load(url, post={'viewed': "adpg"})
self.pyfile.url = url
links = []
soup = BeautifulSoup.BeautifulSoup(html, convertEntities=BeautifulSoup.BeautifulStoneSoup.HTML_ENTITIES)
for container in soup.findAll("div", attrs={'class': "container"}):
href = container.a.get("href")
uploaded = self.uploaded.search(href)
if uploaded is not None:
href = "http://uploaded.net/file/%s" % uploaded.group(1)
links.append(href)
return links
| gpl-3.0 |
silklabs/silk | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
krishnab-datakind/mining-data-acquisition | data_gather/HandlerAssignEEEngineToRequest.py | 1 | 1833 |
#!/usr/bin/python
"""
Handler to assign Earth Engine Engine to request.
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = 'cyclotomiq@gmail.com'
__status__ = 'pre-alpha'
from abcHandler import abcHandler
class HandlerAssignEEEngineToRequest(abcHandler):
def handle(self):
self.request.set_eeCollection(self.settings['imageryCollection'].get_id())
if self.successor is not None:
self.successor(self.request).handle()
if __name__ == "__main__":
print('This is just a handler to create a Google Earth Engine imagery Collection on a request.')
| mit |
hasteur/g13bot_tools | families/wiktionary_family.py | 3 | 31177 | # -*- coding: utf-8 -*-
import family
__version__ = '$Id$'
# The Wikimedia family that is known as Wiktionary
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'wiktionary'
self.languages_by_size = [
'en', 'mg', 'fr', 'zh', 'lt', 'ru', 'el', 'pl', 'ko', 'sv', 'tr',
'de', 'ta', 'nl', 'ku', 'kn', 'fi', 'vi', 'io', 'hu', 'pt', 'ml',
'no', 'es', 'my', 'id', 'it', 'li', 'et', 'ja', 'te', 'fa', 'ro',
'cs', 'ar', 'ca', 'eu', 'jv', 'gl', 'lo', 'uk', 'fj', 'br', 'bg',
'eo', 'hr', 'oc', 'is', 'vo', 'th', 'zh-min-nan', 'simple', 'ps',
'cy', 'scn', 'sr', 'af', 'ast', 'sw', 'fy', 'tl', 'uz', 'da', 'he',
'nn', 'ur', 'wa', 'la', 'hy', 'sq', 'sm', 'nah', 'sl', 'hi', 'az',
'pnb', 'ka', 'tt', 'bs', 'lb', 'lv', 'tk', 'hsb', 'sk', 'kk', 'ky',
'mk', 'km', 'nds', 'be', 'ga', 'wo', 'ms', 'ang', 'co', 'gn', 'mr',
'csb', 'sa', 'st', 'ia', 'sd', 'si', 'sh', 'tg', 'ug', 'mn', 'kl',
'or', 'jbo', 'an', 'ln', 'zu', 'fo', 'gu', 'kw', 'gv', 'rw', 'qu',
'ss', 'vec', 'ie', 'mt', 'om', 'chr', 'bn', 'roa-rup', 'iu', 'pa',
'so', 'am', 'su', 'za', 'gd', 'mi', 'tpi', 'ne', 'yi', 'ti', 'sg',
'na', 'dv', 'tn', 'ha', 'ks', 'ts', 'ay',
]
self.langs = dict([(lang, '%s.wiktionary.org' % lang)
for lang in self.languages_by_size])
# Override defaults
self.namespaces[14]['bn'] = [u'বিষয়শ্রেণী']
self.namespaces[15]['bn'] = [u'বিষয়শ্রেণী আলোচনা']
self.namespaces[2]['ca'] = [u'Usuari']
self.namespaces[3]['ca'] = [u'Usuari Discussió']
self.namespaces[2]['cs'] = [u'Uživatel', u'Uživatelka']
self.namespaces[3]['cs'] = [u'Diskuse s uživatelem', u'Uživatel diskuse', u'Uživatelka diskuse', u'Diskuse s uživatelkou']
self.namespaces[9]['da'] = [u'MediaWiki diskussion', u'MediaWiki-diskussion']
self.namespaces[13]['da'] = [u'Hjælp diskussion', u'Hjælp-diskussion']
self.namespaces[3]['de'] = [u'Benutzer Diskussion', u'BD', u'Benutzerin Diskussion']
self.namespaces[2]['fa'] = [u'کاربر']
self.namespaces[3]['fa'] = [u'بحث کاربر']
self.namespaces[2]['fr'] = [u'Utilisateur']
self.namespaces[3]['fr'] = [u'Discussion utilisateur', u'Discussion Utilisateur']
self.namespaces[8]['hi'] = [u'मीडियाविकि']
self.namespaces[9]['hi'] = [u'मीडियाविकि वार्ता']
self.namespaces[2]['pt'] = [u'Utilizador', u'Usuário', u'Utilizadora']
self.namespaces[3]['pt'] = [u'Utilizador Discussão', u'Usuário Discussão', u'Utilizadora Discussão']
self.namespaces[9]['ro'] = [u'Discuție MediaWiki', u'Discuţie MediaWiki']
self.namespaces[6]['vec'] = [u'File', u'Imagine']
self.namespaces[10]['zh'] = [u'Template', u'模板', u'样板', u'樣板']
self.namespaces[14]['zh'] = [u'Category', u'分类', u'分類']
# Most namespaces are inherited from family.Family.
# Translation used on all wikis for the different namespaces.
# (Please sort languages alphabetically)
# You only need to enter translations that differ from _default.
self.namespaces[4] = {
'_default': self.namespaces[4]['_default'],
'af': u'Wiktionary',
'am': u'Wiktionary',
'an': u'Wiktionary',
'ang': u'Wiktionary',
'ar': [u'ويكاموس', u'Wiktionary'],
'ast': [u'Uiccionariu', u'Wiktionary'],
'ay': u'Wiktionary',
'az': u'Wiktionary',
'be': u'Wiktionary',
'bg': [u'Уикиречник', u'Wiktionary'],
'bn': [u'উইকিঅভিধান', u'Wiktionary'],
'br': [u'Wikeriadur', u'Wiktionary'],
'bs': [u'Vikirječnik', u'Wiktionary'],
'ca': [u'Viccionari', u'Wiktionary'],
'chr': u'Wiktionary',
'co': u'Wiktionary',
'cs': [u'Wikislovník', u'WS', u'WT', u'Wiktionary'],
'csb': u'Wiktionary',
'cy': [u'Wiciadur', u'Wiktionary'],
'da': u'Wiktionary',
'de': [u'Wiktionary', u'WT'],
'dv': [u'ވިކިރަދީފު', u'Wiktionary'],
'dz': u'Wiktionary',
'el': [u'Βικιλεξικό', u'Wiktionary'],
'en': [u'Wiktionary', u'WT'],
'eo': [u'Vikivortaro', u'Wiktionary'],
'es': [u'Wikcionario', u'Wiktionary'],
'et': [u'Vikisõnastik', u'Wiktionary'],
'eu': u'Wiktionary',
'fa': [u'ویکیواژه', u'وو', u'Wiktionary'],
'fi': [u'Wikisanakirja', u'Wiktionary'],
'fj': u'Wiktionary',
'fo': u'Wiktionary',
'fr': [u'Wiktionnaire', u'WT', u'Wiktionary'],
'fy': u'Wiktionary',
'ga': [u'Vicífhoclóir', u'Wiktionary'],
'gd': u'Wiktionary',
'gl': u'Wiktionary',
'gn': u'Wiktionary',
'gu': [u'વિક્શનરી', u'Wiktionary'],
'gv': u'Wiktionary',
'ha': u'Wiktionary',
'he': [u'ויקימילון', u'Wiktionary'],
'hi': [u'विक्षनरी', u'Wiktionary'],
'hr': [u'Wječnik', u'Wiktionary'],
'hsb': [u'Wikisłownik', u'Wiktionary'],
'hu': [u'Wikiszótár', u'Wiktionary'],
'hy': [u'Վիքիբառարան', u'Wiktionary'],
'ia': [u'Wiktionario', u'Wiktionary'],
'id': u'Wiktionary',
'ie': u'Wiktionary',
'ik': u'Wiktionary',
'io': [u'Wikivortaro', u'Wiktionary'],
'is': [u'Wikiorðabók', u'Wiktionary'],
'it': [u'Wikizionario', u'WZ', u'Wiktionary'],
'iu': u'Wiktionary',
'ja': u'Wiktionary',
'jbo': u'Wiktionary',
'jv': u'Wiktionary',
'ka': [u'ვიქსიკონი', u'Wiktionary'],
'kk': [u'Уикисөздік', u'Wiktionary'],
'kl': u'Wiktionary',
'km': u'Wiktionary',
'kn': u'Wiktionary',
'ko': [u'위키낱말사전', u'Wiktionary'],
'ks': u'Wiktionary',
'ku': [u'Wîkîferheng', u'Wiktionary'],
'kw': u'Wiktionary',
'ky': u'Wiktionary',
'la': [u'Victionarium', u'Wiktionary'],
'lb': [u'Wiktionnaire', u'Wiktionary'],
'li': u'Wiktionary',
'ln': u'Wiktionary',
'lo': u'Wiktionary',
'lt': [u'Vikižodynas', u'Wiktionary'],
'lv': u'Wiktionary',
'mg': u'Wiktionary',
'mi': u'Wiktionary',
'mk': u'Wiktionary',
'ml': [u'വിക്കിനിഘണ്ടു', u'Wiktionary', u'വിക്കി നിഘണ്ടു'],
'mn': u'Wiktionary',
'mr': [u'विक्शनरी', u'Wiktionary'],
'ms': u'Wiktionary',
'mt': [u'Wikizzjunarju', u'Wiktionary'],
'my': u'Wiktionary',
'na': u'Wiktionary',
'nah': [u'Wiktionary', u'Wikipedia'],
'nds': u'Wiktionary',
'ne': u'Wiktionary',
'nl': [u'WikiWoordenboek', u'Wiktionary'],
'nn': u'Wiktionary',
'no': u'Wiktionary',
'oc': [u'Wikiccionari', u'Wiktionary'],
'om': u'Wiktionary',
'or': u'Wiktionary',
'pa': u'Wiktionary',
'pl': [u'Wikisłownik', u'WS', u'Wiktionary'],
'pnb': [u'وکشنری', u'Wiktionary'],
'ps': [u'ويکيسيند', u'Wiktionary'],
'pt': [u'Wikcionário', u'Wiktionary'],
'qu': u'Wiktionary',
'ro': [u'Wikționar', u'Wiktionary'],
'roa-rup': u'Wiktionary',
'ru': [u'Викисловарь', u'Wiktionary'],
'rw': u'Wiktionary',
'sa': u'Wiktionary',
'scn': [u'Wikizziunariu', u'Wiktionary'],
'sd': u'Wiktionary',
'sg': u'Wiktionary',
'sh': u'Wiktionary',
'si': [u'වික්ෂනරි', u'Wiktionary'],
'simple': [u'Wiktionary', u'WT'],
'sk': [u'Wikislovník', u'Wiktionary'],
'sl': [u'Wikislovar', u'Wiktionary'],
'sm': u'Wiktionary',
'so': u'Wiktionary',
'sq': u'Wiktionary',
'sr': [u'Викиречник', u'Wiktionary'],
'ss': u'Wiktionary',
'st': u'Wiktionary',
'su': u'Wiktionary',
'sv': [u'Wiktionary', u'WT'],
'sw': u'Wiktionary',
'ta': [u'விக்சனரி', u'Wiktionary', u'விக்கிபீடியா'],
'te': [u'విక్షనరీ', u'Wiktionary'],
'tg': u'Wiktionary',
'th': [u'วิกิพจนานุกรม', u'Wiktionary'],
'ti': u'Wiktionary',
'tk': [u'Wikisözlük', u'Wiktionary'],
'tl': u'Wiktionary',
'tn': u'Wiktionary',
'tpi': u'Wiktionary',
'tr': [u'Vikisözlük', u'Wiktionary'],
'ts': u'Wiktionary',
'tt': u'Wiktionary',
'ug': u'Wiktionary',
'uk': [u'Вікісловник', u'Wiktionary', u'ВС'],
'ur': [u'وکی لغت', u'Wiktionary'],
'uz': [u'Vikilug‘at', u'Wiktionary'],
'vec': [u'Wikisionario', u'Wiktionary'],
'vi': u'Wiktionary',
'vo': [u'Vükivödabuk', u'Wiktionary'],
'wa': u'Wiktionary',
'wo': u'Wiktionary',
'yi': [u'װיקיװערטערבוך', u'וויקיווערטערבוך', u'Wiktionary'],
'za': u'Wiktionary',
'zh': u'Wiktionary',
'zh-min-nan': u'Wiktionary',
'zu': u'Wiktionary',
}
self.namespaces[5] = {
'_default': self.namespaces[5]['_default'],
'ab': u'Обсуждение Wiktionary',
'af': u'Wiktionarybespreking',
'als': u'Wiktionary Diskussion',
'am': u'Wiktionary ውይይት',
'an': u'Descusión Wiktionary',
'ang': u'Wiktionary talk',
'ar': u'نقاش ويكاموس',
'ast': u'Uiccionariu alderique',
'av': u'Обсуждение Wiktionary',
'ay': u'Wiktionary discusión',
'az': u'Wiktionary müzakirəsi',
'ba': u'Wiktionary б-са фекер алышыу',
'be': u'Размовы пра Wiktionary',
'bg': u'Уикиречник беседа',
'bm': u'Discussion Wiktionary',
'bn': u'উইকিঅভিধান আলোচনা',
'br': u'Kaozeadenn Wikeriadur',
'bs': u'Razgovor s Vikirječnikom',
'ca': u'Viccionari Discussió',
'chr': u'Wiktionary talk',
'co': u'Wiktionary talk',
'cs': u'Diskuse k Wikislovníku',
'csb': u'Diskùsëjô Wiktionary',
'cy': u'Sgwrs Wiciadur',
'da': u'Wiktionary diskussion',
'de': u'Wiktionary Diskussion',
'dv': u'ވިކިރަދީފު ޚިޔާ',
'dz': u'Wiktionary talk',
'el': u'Συζήτηση βικιλεξικού',
'en': u'Wiktionary talk',
'eo': u'Vikivortaro-Diskuto',
'es': u'Wikcionario discusión',
'et': u'Vikisõnastiku arutelu',
'eu': u'Wiktionary eztabaida',
'fa': u'بحث ویکیواژه',
'fi': u'Keskustelu Wikisanakirjasta',
'fj': u'Wiktionary talk',
'fo': u'Wiktionary-kjak',
'fr': u'Discussion Wiktionnaire',
'fy': u'Wiktionary oerlis',
'ga': u'Plé Vicífhoclóra',
'gd': u'An deasbaireachd aig Wiktionary',
'gl': u'Conversa Wiktionary',
'gn': u'Wiktionary myangekõi',
'gu': u'વિક્શનરી ચર્ચા',
'gv': u'Resooney Wiktionary',
'ha': u'Wiktionary talk',
'he': u'שיחת ויקימילון',
'hi': u'विक्षनरी वार्ता',
'hr': u'Razgovor Wječnik',
'hsb': u'Diskusija k Wikisłownikej',
'hu': u'Wikiszótár-vita',
'hy': u'Վիքիբառարանի քննարկում',
'ia': u'Discussion Wiktionario',
'id': u'Pembicaraan Wiktionary',
'ie': u'Wiktionary Discussion',
'ik': u'Wiktionary talk',
'io': u'Wikivortaro Debato',
'is': u'Wikiorðabókarspjall',
'it': u'Discussioni Wikizionario',
'iu': u'Wiktionary talk',
'ja': u'Wiktionary・トーク',
'jbo': u'Wiktionary talk',
'jv': u'Dhiskusi Wiktionary',
'ka': u'ვიქსიკონი განხილვა',
'kk': u'Уикисөздік талқылауы',
'kl': u'Wiktionary-p oqalliffia',
'km': u'ការពិភាក្សាអំពីWiktionary',
'kn': u'Wiktionary ಚರ್ಚೆ',
'ko': u'위키낱말사전토론',
'ks': u'Wiktionary بَحَژ',
'ku': u'Gotûbêja Wîkîferhengê',
'kw': u'Keskows Wiktionary',
'ky': u'Wiktionary баарлашуу',
'la': u'Disputatio Victionarii',
'lb': u'Wiktionnaire Diskussioun',
'li': u'Euverlèk Wiktionary',
'ln': u'Discussion Wiktionary',
'lo': u'ສົນທະນາກ່ຽວກັບWiktionary',
'lt': u'Vikižodyno aptarimas',
'lv': u'Wiktionary diskusija',
'mg': u'Dinika amin\'ny Wiktionary',
'mi': u'Wiktionary talk',
'mk': u'Разговор за Wiktionary',
'ml': u'വിക്കിനിഘണ്ടു സംവാദം',
'mn': u'Wiktionary-н хэлэлцүүлэг',
'mr': u'विक्शनरी चर्चा',
'ms': u'Perbincangan Wiktionary',
'mt': u'Diskussjoni Wikizzjunarju',
'my': u'Wiktionary talk',
'na': u'Wiktionary talk',
'nah': u'Wiktionary tēixnāmiquiliztli',
'nap': [u'Wiktionary chiàcchiera', u'Discussioni Wiktionary'],
'nds': u'Wiktionary Diskuschoon',
'ne': u'Wiktionary वार्ता',
'nl': u'Overleg WikiWoordenboek',
'nn': u'Wiktionary-diskusjon',
'no': u'Wiktionary-diskusjon',
'oc': u'Discussion Wikiccionari',
'om': u'Wiktionary talk',
'or': u'Wiktionary ଆଲୋଚନା',
'pa': u'Wiktionary ਗੱਲ-ਬਾਤ',
'pl': u'Wikidyskusja',
'pnb': u'گل ات',
'ps': u'د ويکيسيند خبرې اترې',
'pt': u'Wikcionário Discussão',
'qu': u'Wiktionary rimanakuy',
'ro': u'Discuție Wikționar',
'roa-rup': u'Wiktionary talk',
'ru': u'Обсуждение Викисловаря',
'rw': u'Wiktionary talk',
'sa': u'Wiktionaryसम्भाषणम्',
'sc': u'Wiktionary discussioni',
'scn': u'Discussioni Wikizziunariu',
'sd': u'Wiktionary بحث',
'sg': u'Discussion Wiktionary',
'sh': u'Razgovor o Wiktionary',
'si': u'වික්ෂනරි සාකච්ඡාව',
'simple': u'Wiktionary talk',
'sk': u'Diskusia k Wikislovníku',
'sl': u'Pogovor o Wikislovarju',
'sm': u'Wiktionary talk',
'so': u'Wiktionary talk',
'sq': u'Wiktionary diskutim',
'sr': u'Разговор о викиречнику',
'ss': u'Wiktionary talk',
'st': u'Wiktionary talk',
'su': u'Obrolan Wiktionary',
'sv': u'Wiktionarydiskussion',
'sw': u'Majadiliano ya Wiktionary',
'ta': u'விக்சனரி பேச்சு',
'te': u'విక్షనరీ చర్చ',
'tg': u'Баҳси Wiktionary',
'th': u'คุยเรื่องวิกิพจนานุกรม',
'ti': u'Wiktionary talk',
'tk': u'Wikisözlük çekişme',
'tl': u'Usapang Wiktionary',
'tn': u'Wiktionary talk',
'tpi': u'Wiktionary toktok',
'tr': u'Vikisözlük tartışma',
'ts': u'Wiktionary talk',
'tt': u'Wiktionary бәхәсе',
'ug': u'Wiktionaryمۇنازىرىسى',
'uk': u'Обговорення Вікісловника',
'ur': u'تبادلۂ خیال وکی لغت',
'uz': u'Vikilug‘at munozarasi',
'vec': u'Discussion Wikisionario',
'vi': u'Thảo luận Wiktionary',
'vo': u'Bespik dö Vükivödabuk',
'wa': u'Wiktionary copene',
'wo': u'Wiktionary waxtaan',
'yi': u'װיקיװערטערבוך רעדן',
'za': u'Wiktionary讨论',
'zh': u'Wiktionary talk',
'zh-min-nan': u'Wiktionary talk',
'zu': u'Wiktionary talk',
}
self.namespaces[90] = {
'en': u'Thread',
}
self.namespaces[91] = {
'en': u'Thread talk',
}
self.namespaces[92] = {
'en': u'Summary',
}
self.namespaces[93] = {
'en': u'Summary talk',
}
self.namespaces[100] = {
'bg': u'Словоформи',
'bn': u'উইকিসরাস',
'br': u'Stagadenn',
'bs': u'Portal',
'cs': u'Příloha',
'cy': u'Atodiad',
'el': u'Παράρτημα',
'en': u'Appendix',
'es': u'Apéndice',
'fa': u'پیوست',
'fi': u'Liite',
'fr': u'Annexe',
'ga': u'Aguisín',
'gl': u'Apéndice',
'he': u'נספח',
'hu': u'Függelék',
'id': u'Indeks',
'it': u'Appendice',
'ja': u'付録',
'ko': u'부록',
'ku': u'Pêvek',
'lb': u'Annexen',
'lt': u'Sąrašas',
'lv': u'Pielikums',
'mg': u'Rakibolana',
'no': u'Tillegg',
'oc': u'Annèxa',
'pl': u'Aneks',
'pt': u'Apêndice',
'ro': u'Portal',
'ru': [u'Приложение', u'Appendix'],
'sr': u'Портал',
'tr': u'Portal',
'uk': u'Додаток',
'zh': u'附录',
}
self.namespaces[101] = {
'bg': u'Словоформи беседа',
'bn': u'উইকিসরাস আলোচনা',
'br': u'Kaozeadenn Stagadenn',
'bs': u'Razgovor o Portalu',
'cs': u'Diskuse k příloze',
'cy': u'Sgwrs Atodiad',
'el': u'Συζήτηση παραρτήματος',
'en': u'Appendix talk',
'es': u'Apéndice Discusión',
'fa': u'بحث پیوست',
'fi': u'Keskustelu liitteestä',
'fr': u'Discussion Annexe',
'ga': u'Plé aguisín',
'gl': u'Conversa apéndice',
'he': u'שיחת נספח',
'hu': u'Függelékvita',
'id': u'Pembicaraan Indeks',
'it': u'Discussioni appendice',
'ja': u'付録・トーク',
'ko': u'부록 토론',
'ku': u'Gotûbêja pêvekê',
'lb': u'Annexen Diskussioun',
'lt': u'Sąrašo aptarimas',
'lv': u'Pielikuma diskusija',
'mg': u'Dinika amin\'ny rakibolana',
'no': u'Tilleggdiskusjon',
'oc': u'Discussion Annèxa',
'pl': u'Dyskusja aneksu',
'pt': u'Apêndice Discussão',
'ro': u'Discuție Portal',
'ru': [u'Обсуждение приложения', u'Appendix talk'],
'sr': u'Разговор о порталу',
'tr': u'Portal tartışma',
'uk': u'Обговорення додатка',
'zh': u'附录讨论',
}
self.namespaces[102] = {
'bs': u'Indeks',
'cy': u'Odliadur',
'de': u'Verzeichnis',
'en': u'Concordance',
'fr': u'Transwiki',
'hu': u'Index',
'ia': u'Appendice',
'ku': u'Nimînok',
'lt': u'Priedas',
'pl': u'Indeks',
'pt': u'Vocabulário',
'ro': u'Apendice',
'ru': [u'Конкорданс', u'Concordance'],
'sv': u'Appendix',
'uk': u'Індекс',
'zh': u'Transwiki',
}
self.namespaces[103] = {
'bs': u'Razgovor o Indeksu',
'cy': u'Sgwrs Odliadur',
'de': u'Verzeichnis Diskussion',
'en': u'Concordance talk',
'fr': u'Discussion Transwiki',
'hu': u'Indexvita',
'ia': u'Discussion Appendice',
'ku': u'Gotûbêja nimînokê',
'lt': u'Priedo aptarimas',
'pl': u'Dyskusja indeksu',
'pt': u'Vocabulário Discussão',
'ro': u'Discuție Apendice',
'ru': [u'Обсуждение конкорданса', u'Concordance talk'],
'sv': u'Appendixdiskussion',
'uk': u'Обговорення індексу',
'zh': u'Transwiki talk',
}
self.namespaces[104] = {
'bs': u'Dodatak',
'cy': u'WiciSawrws',
'de': u'Thesaurus',
'en': u'Index',
'fr': u'Portail',
'ku': u'Portal',
'mr': u'सूची',
'pl': u'Portal',
'pt': u'Rimas',
'ru': [u'Индекс', u'Index'],
'sv': u'Rimord',
}
self.namespaces[105] = {
'bs': u'Razgovor o Dodatku',
'cy': u'Sgwrs WiciSawrws',
'de': u'Thesaurus Diskussion',
'en': u'Index talk',
'fr': u'Discussion Portail',
'ku': u'Gotûbêja portalê',
'mr': u'सूची चर्चा',
'pl': u'Dyskusja portalu',
'pt': u'Rimas Discussão',
'ru': [u'Обсуждение индекса', u'Index talk'],
'sv': u'Rimordsdiskussion',
}
self.namespaces[106] = {
'de': u'Reim',
'en': u'Rhymes',
'fr': u'Thésaurus',
'is': u'Viðauki',
'pt': u'Portal',
'ru': [u'Рифмы', u'Rhymes'],
'sv': u'Transwiki',
}
self.namespaces[107] = {
'de': u'Reim Diskussion',
'en': u'Rhymes talk',
'fr': u'Discussion Thésaurus',
'is': u'Viðaukaspjall',
'pt': u'Portal Discussão',
'ru': [u'Обсуждение рифм', u'Rhymes talk'],
'sv': u'Transwikidiskussion',
}
self.namespaces[108] = {
'en': u'Transwiki',
'fr': u'Projet',
'pt': u'Citações',
}
self.namespaces[109] = {
'en': u'Transwiki talk',
'fr': u'Discussion Projet',
'pt': u'Citações Discussão',
}
self.namespaces[110] = {
'en': [u'Wikisaurus', u'WS'],
'is': u'Samheitasafn',
'ko': u'미주알고주알',
}
self.namespaces[111] = {
'en': u'Wikisaurus talk',
'is': u'Samheitasafnsspjall',
'ko': u'미주알고주알 토론',
}
self.namespaces[114] = {
'en': u'Citations',
}
self.namespaces[115] = {
'en': u'Citations talk',
}
self.namespaces[116] = {
'en': u'Sign gloss',
}
self.namespaces[117] = {
'en': u'Sign gloss talk',
}
# Global bot allowed languages on
# http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = [
'am', 'an', 'ang', 'ast', 'ay', 'az', 'be', 'bg', 'bn', 'br', 'bs',
'ca', 'chr', 'co', 'cy', 'da', 'dv', 'eo', 'es', 'et', 'eu', 'fa',
'fi', 'fj', 'fo', 'fy', 'ga', 'gd', 'gl', 'gn', 'gv', 'hu', 'ia',
'id', 'ie', 'io', 'jv', 'ka', 'kl', 'kn', 'ku', 'ky', 'lb', 'lo',
'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'my', 'ne', 'nl', 'no', 'oc',
'or', 'pt', 'sh', 'simple', 'sk', 'sl', 'sm', 'su', 'tg', 'th',
'ti', 'tk', 'tn', 'tpi', 'ts', 'ug', 'uk', 'vo', 'wa', 'wo', 'zh',
'zh-min-nan', 'zu',
]
# Other than most Wikipedias, page names must not start with a capital
# letter on ALL Wiktionaries.
self.nocapitalize = self.langs.keys()
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
self.alphabetic_sv = [
'aa', 'af', 'ak', 'als', 'an', 'roa-rup', 'ast', 'gn', 'ay', 'az',
'id', 'ms', 'bm', 'zh-min-nan', 'jv', 'su', 'mt', 'bi', 'bo', 'bs',
'br', 'ca', 'cs', 'ch', 'sn', 'co', 'za', 'cy', 'da', 'de', 'na',
'mh', 'et', 'ang', 'en', 'es', 'eo', 'eu', 'to', 'fr', 'fy', 'fo',
'ga', 'gv', 'sm', 'gd', 'gl', 'hr', 'io', 'ia', 'ie', 'ik', 'xh',
'is', 'zu', 'it', 'kl', 'csb', 'kw', 'rw', 'rn', 'sw', 'ky', 'ku',
'la', 'lv', 'lb', 'lt', 'li', 'ln', 'jbo', 'hu', 'mg', 'mi', 'mo',
'my', 'fj', 'nah', 'nl', 'cr', 'no', 'nn', 'hsb', 'oc', 'om', 'ug',
'uz', 'nds', 'pl', 'pt', 'ro', 'rm', 'qu', 'sg', 'sc', 'st', 'tn',
'sq', 'scn', 'simple', 'ss', 'sk', 'sl', 'so', 'sh', 'fi', 'sv',
'tl', 'tt', 'vi', 'tpi', 'tr', 'tw', 'vo', 'wa', 'wo', 'ts', 'yo',
'el', 'av', 'ab', 'ba', 'be', 'bg', 'mk', 'mn', 'ru', 'sr', 'tg',
'uk', 'kk', 'hy', 'yi', 'he', 'ur', 'ar', 'tk', 'sd', 'fa', 'ha',
'ps', 'dv', 'ks', 'ne', 'pi', 'bh', 'mr', 'sa', 'hi', 'as', 'bn',
'pa', 'pnb', 'gu', 'or', 'ta', 'te', 'kn', 'ml', 'si', 'th', 'lo',
'dz', 'ka', 'ti', 'am', 'chr', 'iu', 'km', 'zh', 'ja', 'ko',
]
self.interwiki_putfirst = {
'da': self.alphabetic,
'en': self.alphabetic,
'et': self.alphabetic,
'fi': self.alphabetic,
'fy': self.fyinterwiki,
'he': ['en'],
'hu': ['en'],
'ms': self.alphabetic_revised,
'pl': self.alphabetic_revised,
'sv': self.alphabetic_sv,
'simple': self.alphabetic,
}
self.obsolete = {
'aa': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Afar_Wiktionary
'ab': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Abkhaz_Wiktionary
'ak': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Akan_Wiktionary
'als': None, # http://als.wikipedia.org/wiki/Wikipedia:Stammtisch/Archiv_2008-1#Afterwards.2C_closure_and_deletion_of_Wiktionary.2C_Wikibooks_and_Wikiquote_sites
'as': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Assamese_Wiktionary
'av': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Avar_Wiktionary
'ba': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bashkir_Wiktionary
'bh': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bihari_Wiktionary
'bi': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bislama_Wiktionary
'bm': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bambara_Wiktionary
'bo': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Tibetan_Wiktionary
'ch': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Chamorro_Wiktionary
'cr': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nehiyaw_Wiktionary
'dk': 'da',
'dz': None,
'ik': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Inupiak_Wiktionary
'jp': 'ja',
'mh': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Marshallese_Wiktionary
'mo': 'ro', # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Moldovan_Wiktionary
'minnan':'zh-min-nan',
'nb': 'no',
'pi': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Pali_Bhasa_Wiktionary
'rm': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Rhaetian_Wiktionary
'rn': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kirundi_Wiktionary
'sc': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Sardinian_Wiktionary
'sn': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Shona_Wiktionary
'to': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Tongan_Wiktionary
'tlh': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Klingon_Wiktionary
'tw': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Twi_Wiktionary
'tokipona': None,
'xh': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Xhosa_Wiktionary
'yo': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Yoruba_Wiktionary
'zh-tw': 'zh',
'zh-cn': 'zh'
}
self.interwiki_on_one_line = ['pl']
self.interwiki_attop = ['pl']
| gpl-2.0 |
mfraezz/osf.io | admin_tests/utilities.py | 30 | 1197 | """
Utilities to help run Django tests
* setup_view - replaces as_view
"""
from osf_tests.factories import UserFactory
def setup_view(view, request, *args, **kwargs):
"""Mimic as_view() returned callable, but returns view instance
http://tech.novapost.fr/django-unit-test-your-views-en.html
"""
view.request = request
view.args = args
view.kwargs = kwargs
return view
def setup_form_view(view, request, form, *args, **kwargs):
"""Mimic as_view and with forms to skip some of the context"""
view.request = request
try:
view.request.user = request.user
except AttributeError:
view.request.user = UserFactory()
view.args = args
view.kwargs = kwargs
view.form = form
return view
def setup_user_view(view, request, user, *args, **kwargs):
view.request = request
view.request.user = user
view.args = args
view.kwargs = kwargs
return view
def setup_log_view(view, request, *args, **kwargs):
view.request = request
try:
view.request.user = request.user
except AttributeError:
view.request.user = UserFactory()
view.args = args
view.kwargs = kwargs
return view
| apache-2.0 |
liuzzfnst/tp-libvirt | libvirt/tests/src/virsh_cmd/host/virsh_nodesuspend.py | 7 | 5329 | import time
import logging
from virttest import virsh
from virttest import libvirt_vm
from virttest.remote import LoginTimeoutError, LoginProcessTerminatedError
from autotest.client.shared import error
from autotest.client import utils
class TimeoutError(Exception):
"""
Simple custom exception raised when host down time exceeds timeout.
"""
def __init__(self, msg):
super(TimeoutError, self).__init__(self)
self.msg = msg
def __str__(self):
return ("TimeoutError: %s" % self.msg)
def check_host_down_time(remote_ip, timeout=300):
"""
Test for how long a target host went down.
:param remote_ip: IP address or hostname of target host.
:param timeout: For how long will return a timeout expection
if host is not recovered.
:return: Time elapsed before target host is pingable.
:raise TimeoutExpection: :
"""
start_time = time.time()
end_time = time.time() + timeout
ping_cmd = 'ping -c 1 -W 1 ' + remote_ip
logging.debug('Wait for host shutting down.')
while True:
if time.time() > end_time:
raise TimeoutError(
'Downtime %s exceeds maximum allowed %s' %
(time.time() - start_time, timeout))
res = utils.run(ping_cmd, ignore_status=True, verbose=False)
if res.exit_status:
logging.debug('Host %s is down.', remote_ip)
break
else:
logging.debug('Host %s is up.', remote_ip)
time.sleep(1)
logging.debug('Time elapsed before host down: %.2fs',
(time.time() - start_time))
logging.debug('Wait for host recover from sleep.')
while True:
if time.time() > end_time:
raise TimeoutError(
'Downtime %s exceeds maximum allowed %s' %
(time.time() - start_time, timeout))
res = utils.run(ping_cmd, ignore_status=True, verbose=False)
if res.exit_status:
logging.debug('Host %s is down.', remote_ip)
else:
logging.debug('Host %s is up.', remote_ip)
break
down_time = time.time() - start_time
logging.debug('Time elapsed before host up: %.2fs', down_time)
return down_time
def run(test, params, env):
"""
Test command: virsh nodesuspend <target> <duration>
This command will make host suspend or hibernate, running tests on testing
grid may cause unexpected behavior.
This tests only work when test runner setup a remote host (physical or
virtual) with testing version of libvirt daemon running. After that change
the remote_xxx parameters in configuration file to corresponding value.
"""
# Retrive parameters
remote_ip = params.get('nodesuspend_remote_ip',
'ENTER.YOUR.REMOTE.EXAMPLE.COM')
remote_user = params.get('nodesuspend_remote_user', 'root')
remote_pwd = params.get('nodesuspend_remote_pwd', 'EXAMPLE.PWD')
suspend_target = params.get('suspend_target', 'mem')
suspend_time = int(params.get('suspend_time', '60'))
upper_tolerance = int(params.get('upper_tolerance', '8'))
lower_tolerance = int(params.get('lower_tolerance', '0'))
expect_succeed = params.get('expect_succeed', 'yes')
# Check if remote_ip is set
if 'EXAMPLE' in remote_ip:
msg = ('Configuration parameter `nodesuspend_remote_ip` need to be '
'changed to the ip of host to be tested')
raise error.TestNAError(msg)
# Create remote virsh session
remote_uri = libvirt_vm.get_uri_with_transport(
transport="ssh", dest_ip=remote_ip)
virsh_dargs = {
'remote_user': remote_user,
'remote_pwd': remote_pwd,
'uri': remote_uri}
try:
vrsh = virsh.VirshPersistent(**virsh_dargs)
except (LoginTimeoutError, LoginProcessTerminatedError):
raise error.TestNAError('Cannot login to remote host, Skipping')
# Run test
result = vrsh.nodesuspend(suspend_target, suspend_time, ignore_status=True)
logging.debug(result)
# Check real suspend time if command successfully run
if result.exit_status == 0:
try:
down_time = check_host_down_time(
remote_ip,
timeout=suspend_time + upper_tolerance)
# Wait for PM to return completely
time.sleep(5)
# Check if host down time within tolerance
if not (suspend_time - lower_tolerance <
down_time <
suspend_time + upper_tolerance):
raise error.TestFail('Down time (%.2fs) not in range (%ds)'
'+ (%ds) - (%ds).'
% (down_time, suspend_time,
upper_tolerance, lower_tolerance))
except TimeoutError, e:
# Mark test FAIL if down time exceeds expectation
logging.debug(e)
vrsh.close_session()
raise error.TestFail('Timeout when checking host down time.')
# Check whether exit code match expectation.
if (result.exit_status == 0) != (expect_succeed == 'yes'):
raise error.TestFail(
'Result do not meet expect_succeed (%s). Result:\n %s' %
(expect_succeed, result))
| gpl-2.0 |
fujunwei/chromium-crosswalk | third_party/tlslite/tlslite/utils/openssl_aes.py | 202 | 1944 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto AES implementation."""
from .cryptomath import *
from .aes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_AES(key, mode, IV)
class OpenSSL_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
if len(self.key)==16:
cipherType = m2.aes_128_cbc()
if len(self.key)==24:
cipherType = m2.aes_192_cbc()
if len(self.key)==32:
cipherType = m2.aes_256_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(ciphertext)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will discard it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(plaintext)
| bsd-3-clause |
jclakkis/discus-inferno | flaskenv/lib/python2.7/site-packages/setuptools/package_index.py | 258 | 38941 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
from functools import wraps
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
require, Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from setuptools.compat import (urllib2, httplib, StringIO, HTTPError,
urlparse, urlunparse, unquote, splituser,
url2pathname, name2codepoint,
unichr, urljoin, urlsplit, urlunsplit,
ConfigParser)
from setuptools.compat import filterfalse
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse(url)
base = unquote(path.split('/')[-1])
if server=='sourceforge.net' and base=='download': # XXX Yuck
base = unquote(path.split('/')[-2])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version:
for i,p in enumerate(parts[2:]):
if len(p)==5 and p.startswith('py2.'):
return # It's a bdist_dumb, not an sdist -- bail out
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s setuptools/%s" % (
sys.version[:3], require('setuptools')[0].version
)
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()):
self.opener = ssl_support.opener_for(ca_bundle)
else: self.opener = urllib2.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = [_f for _f in map(str.strip,
open(os.path.join(path, entry))) if _f]
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
unquote, link[len(self.index_url):].split('/')
))
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=self.download(dist.location, tmpdir))
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
file = open(os.path.join(tmpdir, 'setup.py'), 'w')
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
file.close()
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, tfp, info = None, None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
tfp = open(filename,'wb')
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp: fp.close()
if tfp: tfp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, httplib.InvalidURL):
v = sys.exc_info()[1]
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError:
v = sys.exc_info()[1]
return v
except urllib2.URLError:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except httplib.HTTPException:
v = sys.exc_info()[1]
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme=='file':
return url2pathname(urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/',1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':',1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username="+auth
netloc = host
url = urlunparse((scheme, netloc, url, p, q, f))
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#',1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#',1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(ConfigParser.ConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
ConfigParser.ConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib2.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise httplib.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)' % info)
if auth:
auth = "Basic " + _encode_auth(auth)
new_url = urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse(url)
filename = url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
fp = open(os.path.join(filename,f),'r')
body = fp.read()
fp.close()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
return HTTPError(url, status, message, headers, StringIO(body))
| mit |
drulang/nnw | nnw.py | 1 | 1854 | import sys
import os
mappings = {
"a": [4, "@"],
"b": ["13"],
"c": ["("],
"d": ["[)"],
"e": [3],
#"f": ["|="],
"g": [6],
#"h": ["|-|"],
"i": [1, '!', "|"],
#"j": [".]"],
"k": ["|<"],
"l": [1],
"m": ['|Y|'],
#"n": ["/\\/"],
"o": [0],
#"p": ["|>"],
"q": ['0,'],
#"r": ['|2'],
"s": [5, "$"],
"t": [7],
"u": ['[_]','|_|'],
"v": ['\\/'],
"w": ['\\v/'],
#"x": ['}{'],
"y": ['`/'],
"z": ['2'],
}
def parse_words(word_list):
generated_words = []
for word in word_list:
parse_word(word, generated_words)
return generated_words
def parse_word(word, generated_word_list):
if word not in generated_word_list:
generated_word_list.append(word)
word_list = list(word) #Turn string into list so it's easier to do char replacement
for letter in word:
if letter.lower() in mappings:
for alt_char in mappings[letter.lower()]:
#Create a new word for each alt char
char_index = word.index(letter)
word_list[char_index] = str(alt_char)
new_word = "".join(word_list)
parse_word(new_word, generated_word_list)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Please pass in file location"
exit(1)
file_location = sys.argv[1]
if not os.path.isfile(file_location):
print "File does not exist or is a directory"
exit(1)
with open("nonowords", "w+") as bad_word_file:
with open(file_location) as file_handle:
for word in file_handle:
if len(word) > 0:
bad_words = []
parse_word(word, bad_words)
for bad_word in bad_words:
bad_word_file.write(bad_word)
| gpl-2.0 |
Pikecillo/genna | external/4Suite-XML-1.0.2/test/Xml/XPath/Core/test_predicate_list.py | 1 | 1939 | from Ft.Xml import Domlette
from xml.dom import Node
source_1 = """<?xml version="1.0"?>
<elementList>
<element>
<x>
<y>a</y>
</x>
</element>
<element>
<x>
<y>z</y>
</x>
</element>
</elementList>"""
def Test(tester):
tester.startGroup('Predicate List')
tester.startTest('Checking syntax')
from Ft.Xml.XPath import ParsedPredicateList
from Ft.Xml.XPath import ParsedExpr
from Ft.Xml.XPath import Context
from Ft.Xml.XPath import Evaluate
import DummyExpr
DomTree = tester.test_data['tree']
tester.testDone()
tester.startTest('Creating test environment')
t = DummyExpr.DummyBooleanExpr(1)
f = DummyExpr.DummyBooleanExpr(0)
a = ParsedExpr.ParsedAndExpr(t,f)
o = ParsedExpr.ParsedOrExpr(t,f)
context = Context.Context(DomTree.ROOT,1,1)
tester.testDone()
p = ParsedPredicateList.ParsedPredicateList([a,t])
tester.startTest('Filter of "%s"' % repr(p))
result = p.filter([context.node], context, 0)
tester.compare(0, len(result))
tester.testDone()
p = ParsedPredicateList.ParsedPredicateList([o,t])
tester.startTest('Filter of "%s"' % repr(p))
result = p.filter([context.node], context, 0)
tester.compare([DomTree.ROOT], result)
tester.testDone()
dom = Domlette.NonvalidatingReader.parseString(source_1,'.')
expected = filter(lambda x: x.nodeType == Node.ELEMENT_NODE,
dom.documentElement.childNodes)[-1]
tests = [("//element[descendant::y[.='z']]", [expected]),
("//element[descendant::y[.='z']][1]", [expected]),
("//element[descendant::y[.='z']][2]", []),
]
for (expr, expected) in tests:
tester.startTest(expr)
actual = Evaluate(expr, contextNode=dom)
tester.compare(expected, actual)
tester.testDone()
return tester.groupDone()
| gpl-2.0 |
jondo/shogun | examples/undocumented/python_modular/structure_graphcuts.py | 14 | 8660 | #!/usr/bin/env python
import numpy as np
import itertools
from modshogun import Factor, TableFactorType, FactorGraph
from modshogun import FactorGraphObservation, FactorGraphLabels, FactorGraphFeatures
from modshogun import FactorGraphModel, GRAPH_CUT
from modshogun import GraphCut
from modshogun import StochasticSOSVM
def generate_data(num_train_samples, len_label, len_feat):
""" Generate synthetic dataset
Generate random data following [1]:
Each example has exactly one label on.
Each label has 40 related binary features.
For an example, if label i is on, 4i randomly chosen features are set to 1
[1] Finley, Thomas, and Thorsten Joachims.
"Training structural SVMs when exact inference is intractable."
Proceedings of the 25th international conference on Machine learning. ACM, 2008.
Args:
num_train_samples: number of samples
len_label: label length (10)
len_feat: feature length (40)
Returns:
feats: generated feature matrix
labels: generated label matrix
"""
labels = np.zeros((num_train_samples, len_label), np.int32)
feats = np.zeros((num_train_samples, len_feat), np.int32)
for k in range(num_train_samples):
i = k % len_label
labels[k, i] = 1
inds_one = np.random.permutation(range(len_feat))
inds_one = inds_one[:4*(i+1)]
for j in inds_one:
feats[k, j] = 1
return (labels, feats)
def define_factor_types(num_vars, len_feat, edge_table):
""" Define factor types
Args:
num_vars: number of variables in factor graph
len_feat: length of the feature vector
edge_table: edge table defines pair-wise node indeces
Returns:
v_factor_types: list of all unary and pair-wise factor types
"""
n_stats = 2 # for binary status
v_factor_types = {}
n_edges = edge_table.shape[0]
# unary factors
cards_u = np.array([n_stats], np.int32)
w_u = np.zeros(n_stats*len_feat)
for i in range(num_vars):
v_factor_types[i] = TableFactorType(i, cards_u, w_u)
# pair-wise factors
cards_pw = np.array([n_stats, n_stats], np.int32)
w_pw = np.zeros(n_stats*n_stats)
for j in range(n_edges):
v_factor_types[j + num_vars] = TableFactorType(j + num_vars, cards_pw, w_pw)
return v_factor_types
def build_factor_graph_model(labels, feats, factor_types, edge_table, infer_alg = GRAPH_CUT):
""" Build factor graph model
Args:
labels: matrix of labels [num_train_samples*len_label]
feats: maxtrix of feats [num_train_samples*len_feat]
factory_types: vectors of all factor types
edge_table: matrix of pairwised edges, each row is a pair of node indeces
infer_alg: inference algorithm (GRAPH_CUT)
Returns:
labels_fg: matrix of labels in factor graph format
feats_fg: matrix of features in factor graph format
"""
labels = labels.astype(np.int32)
num_train_samples = labels.shape[0]
num_vars = labels.shape[1]
num_edges = edge_table.shape[0]
n_stats = 2
feats_fg = FactorGraphFeatures(num_train_samples)
labels_fg = FactorGraphLabels(num_train_samples)
for i in range(num_train_samples):
cardinaities = np.array([n_stats]*num_vars, np.int32)
fg = FactorGraph(cardinaities)
# add unary factors
for u in range(num_vars):
data_u = np.array(feats[i,:], np.float64)
inds_u = np.array([u], np.int32)
factor_u = Factor(factor_types[u], inds_u, data_u)
fg.add_factor(factor_u)
# add pairwise factors
for v in range(num_edges):
data_p = np.array([1.0])
inds_p = np.array(edge_table[v, :], np.int32)
factor_p = Factor(factor_types[v + num_vars], inds_p, data_p)
fg.add_factor(factor_p)
# add factor graph
feats_fg.add_sample(fg)
# add corresponding label
loss_weights = np.array([1.0/num_vars]*num_vars)
fg_obs = FactorGraphObservation(labels[i,:], loss_weights)
labels_fg.add_label(fg_obs)
return (labels_fg, feats_fg)
def evaluation(labels_pr, labels_gt, model):
""" Evaluation
Args:
labels_pr: predicted label
labels_gt: ground truth label
model: factor graph model
Returns:
ave_loss: average loss
"""
num_train_samples = labels_pr.get_num_labels()
acc_loss = 0.0
ave_loss = 0.0
for i in range(num_train_samples):
y_pred = labels_pr.get_label(i)
y_truth = labels_gt.get_label(i)
acc_loss = acc_loss + model.delta_loss(y_truth, y_pred)
ave_loss = acc_loss / num_train_samples
return ave_loss
def graphcuts_sosvm(num_train_samples = 20, len_label = 10, len_feat = 40, num_test_samples = 10):
""" Graph cuts as approximate inference in structured output SVM framework.
Args:
num_train_samples: number of training samples
len_label: number of classes, i.e., size of label space
len_feat: the dimention of the feature vector
num_test_samples: number of testing samples
"""
import time
# generate synthetic dataset
(labels_train, feats_train) = generate_data(num_train_samples, len_label, len_feat)
# compute full-connected edge table
full = np.vstack([x for x in itertools.combinations(range(len_label), 2)])
# define factor types
factor_types = define_factor_types(len_label, len_feat, full)
# create features and labels for factor graph mode
(labels_fg, feats_fg) = build_factor_graph_model(labels_train, feats_train, factor_types, full, GRAPH_CUT)
# create model and register factor types
model = FactorGraphModel(feats_fg, labels_fg, GRAPH_CUT)
for i in range(len(factor_types)):
model.add_factor_type(factor_types[i])
# Training
# the 3rd parameter is do_weighted_averaging, by turning this on,
# a possibly faster convergence rate may be achieved.
# the 4th parameter controls outputs of verbose training information
sgd = StochasticSOSVM(model, labels_fg, True, True)
sgd.set_num_iter(150)
sgd.set_lambda(0.0001)
# train
t0 = time.time()
sgd.train()
t1 = time.time()
w_sgd = sgd.get_w()
#print "SGD took", t1 - t0, "seconds."
# training error
labels_pr = sgd.apply()
ave_loss = evaluation(labels_pr, labels_fg, model)
#print('SGD: Average training error is %.4f' % ave_loss)
# testing error
# generate synthetic testing dataset
(labels_test, feats_test) = generate_data(num_test_samples, len_label, len_feat)
# create features and labels for factor graph mode
(labels_fg_test, feats_fg_test) = build_factor_graph_model(labels_test, feats_test, factor_types, full, GRAPH_CUT)
# set features and labels to sgd
sgd.set_features(feats_fg_test)
sgd.set_labels(labels_fg_test)
# test
labels_pr = sgd.apply()
ave_loss = evaluation(labels_pr, labels_fg_test, model)
#print('SGD: Average testing error is %.4f' % ave_loss)
def graphcuts_general():
""" Graph cuts for general s-t graph optimization.
"""
num_nodes = 5
num_edges = 6
g = GraphCut(num_nodes, num_edges)
# add termainal-connected edges
# i.e., SOURCE->node_i and node_i->SINK
g.add_tweights(0, 4, 0)
g.add_tweights(1, 2, 0)
g.add_tweights(2, 8, 0)
g.add_tweights(2, 0, 4)
g.add_tweights(3, 0, 7)
g.add_tweights(4, 0, 5)
# add node to node edges
g.add_edge(0, 2, 5, 0)
g.add_edge(0, 3, 2, 0)
g.add_edge(1, 2, 6, 0)
g.add_edge(1, 4, 9, 0)
g.add_edge(2, 3, 1, 0)
g.add_edge(2, 4, 3, 0)
# initialize max-flow algorithm
g.init_maxflow()
# compute max flow
flow = g.compute_maxflow()
#print("Flow = %f" % flow)
# print assignment
#for i in xrange(num_nodes):
# print("\nNode %d = %d" % (i, g.get_assignment(i)))
test_general = True
test_sosvm = True
parameter_list = [[test_general, test_sosvm]]
def structure_graphcuts(test_general=True, test_sosvm=True):
""" Test graph cuts.
Args:
test_general: test graph cuts for general s-t graph optimization
test_sosvm: test graph cuts for structured output svm
"""
if test_general:
graphcuts_general()
if test_sosvm:
graphcuts_sosvm()
if __name__ == '__main__':
print("Graph cuts")
structure_graphcuts(*parameter_list[0])
| gpl-3.0 |
lindamar/ecclesi | env/lib/python2.7/site-packages/flask/app.py | 47 | 83370 | # -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from collections import deque
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest, default_exceptions
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package, \
get_debug_flag
from . import json, cli
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
# a singleton sentinel value for parameter defaults
_sentinel = object()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the :file:`__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea of what
belongs to your application. This name is used to find resources
on the filesystem, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in :file:`yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
.. versionadded:: 0.11
The `root_path` parameter was added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to ``True`` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
:param root_path: Flask by default will automatically calculate the path
to the root of the application. In certain situations
this cannot be achieved (for instance if the package
is a Python 3 namespace package) and needs to be
manually defined.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the Jinja environment.
#:
#: .. versionadded:: 0.11
jinja_environment = Environment
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on unexpected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is now application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The class that is used for the ``config`` attribute of this app.
#: Defaults to :class:`~flask.Config`.
#:
#: Example use cases for a custom class:
#:
#: 1. Default values for certain config options.
#: 2. Access to config values through attributes in addition to keys.
#:
#: .. versionadded:: 0.11
config_class = Config
#: The debug flag. Set this to ``True`` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the ``DEBUG``
#: configuration key. Defaults to ``False``.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to ``True`` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: ``TESTING`` configuration key. Defaults to ``False``.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: ``SECRET_KEY`` configuration key. Defaults to ``None``.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: A :class:`~datetime.timedelta` which is used as default cache_timeout
#: for the :func:`send_file` functions. The default is 12 hours.
#:
#: This attribute can also be configured from the config with the
#: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration
#: variable can also be set with an integer value used as seconds.
#: Defaults to ``timedelta(hours=12)``
send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': get_debug_flag(default=False),
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'LOGGER_HANDLER_POLICY': 'always',
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'SESSION_REFRESH_EACH_REQUEST': True,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12),
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'EXPLAIN_TEMPLATE_LOADING': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
'JSONIFY_MIMETYPE': 'application/json',
'TEMPLATES_AUTO_RELOAD': None,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False,
root_path=None):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder,
root_path=root_path)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is ``None``
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where the key is the status code of the http exception. The
#: special key ``None`` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns ``None`` or raises a :exc:`BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, ``None`` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, ``None`` for all requests. This can for
#: example be used to close database connections. To register a function
#: here, use the :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: ``None`` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: ``None`` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key ``None`` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, ``None`` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: A list of shell context processor functions that should be run
#: when a shell context is created.
#:
#: .. versionadded:: 0.11
self.shell_context_processors = []
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
self._blueprint_order = []
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
#: The click command line context for this application. Commands
#: registered here show up in the :command:`flask` command once the
#: application has been discovered. The default commands are
#: provided by Flask itself and can be overridden.
#:
#: This is an instance of a :class:`click.Group` object.
self.cli = cli.AppGroup(self.name)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to ``True`` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return self.config_class(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
.. versionchanged:: 0.11
``Environment.auto_reload`` set in accordance with
``TEMPLATES_AUTO_RELOAD`` configuration option.
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
if 'auto_reload' not in options:
if self.config['TEMPLATES_AUTO_RELOAD'] is not None:
options['auto_reload'] = self.config['TEMPLATES_AUTO_RELOAD']
else:
options['auto_reload'] = self.debug
rv = self.jinja_environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns ``True`` if autoescaping should be active for the given
template name. If no template name is given, returns `True`.
.. versionadded:: 0.5
"""
if filename is None:
return True
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def make_shell_context(self):
"""Returns the shell context for an interactive shell for this
application. This runs all the registered shell context
processors.
.. versionadded:: 0.11
"""
rv = {'app': self, 'g': g}
for processor in self.shell_context_processors:
rv.update(processor())
return rv
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server.
Do not use ``run()`` in a production setting. It is not intended to
meet security and performance requirements for a production server.
Instead, see :ref:`deployment` for WSGI server recommendations.
If the :attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
It is not recommended to use this function for development with
automatic reloading as this is badly supported. Instead you should
be using the :command:`flask` command line script's ``run`` support.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to ``True`` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
options.setdefault('passthrough_errors', True)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True, **kwargs):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a ``with`` block to defer the closing down
of the context until the end of the ``with`` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
Additionally, you may pass optional keyword arguments that will then
be passed to the application's :attr:`test_client_class` constructor.
For example::
from flask.testing import FlaskClient
class CustomClient(FlaskClient):
def __init__(self, authentication=None, *args, **kwargs):
FlaskClient.__init__(*args, **kwargs)
self._authentication = authentication
app.test_client_class = CustomClient
client = app.test_client(authentication='Basic ....')
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for ``with`` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
.. versionchanged:: 0.11
Added `**kwargs` to support passing additional keyword arguments to
the constructor of :attr:`test_client_class`.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies, **kwargs)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Register a blueprint on the application. For information about
blueprints head over to :ref:`blueprints`.
The blueprint name is passed in as the first argument.
Options are passed as additional keyword arguments and forwarded to
`blueprints` in an "options" dictionary.
:param subdomain: set a subdomain for the blueprint
:param url_prefix: set the prefix for all URLs defined on the blueprint.
``(url_prefix='/<lang code>')``
:param url_defaults: a dictionary with URL defaults that is added to
each and every URL defined with this blueprint
:param static_folder: add a static folder to urls in this blueprint
:param static_url_path: add a static url path to urls in this blueprint
:param template_folder: set an alternate template folder
:param root_path: set an alternate root path for this blueprint
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
first_registration = True
blueprint.register(self, options, first_registration)
def iter_blueprints(self):
"""Iterates over all blueprints by the order they were registered.
.. versionadded:: 0.11
"""
return iter(self._blueprint_order)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
``OPTIONS`` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only ``GET`` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
if isinstance(methods, string_types):
raise TypeError('Allowed methods have to be iterables of strings, '
'for example: @app.route(..., methods=["POST"])')
methods = set(item.upper() for item in methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (``GET``, ``POST`` etc.). By default a rule
just listens for ``GET`` (and implicitly ``HEAD``).
Starting with Flask 0.6, ``OPTIONS`` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@staticmethod
def _get_exc_class_and_code(exc_class_or_code):
"""Ensure that we register only exceptions as handler keys"""
if isinstance(exc_class_or_code, integer_types):
exc_class = default_exceptions[exc_class_or_code]
else:
exc_class = exc_class_or_code
assert issubclass(exc_class, Exception)
if issubclass(exc_class, HTTPException):
return exc_class, exc_class.code
else:
return exc_class, None
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first ``None`` refers to the active blueprint. If the error
handler should be application wide ``None`` shall be used.
.. versionadded:: 0.7
Use :meth:`register_error_handler` instead of modifying
:attr:`error_handler_spec` directly, for application wide error
handlers.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
"""
:type key: None|str
:type code_or_exception: int|T<=Exception
:type f: callable
"""
if isinstance(code_or_exception, HTTPException): # old broken behavior
raise ValueError(
'Tried to register a handler for an exception instance {0!r}. '
'Handlers can only be registered for exception classes or HTTP error codes.'
.format(code_or_exception))
exc_class, code = self._get_exc_class_and_code(code_or_exception)
handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {})
handlers[exc_class] = f
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request.
The function will be called without any arguments.
If the function returns a non-None value, it's handled as
if it was the return value from the view and further
request handling is stopped.
"""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
The function will be called without any arguments and its return
value is ignored.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
return f
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request.
Your function must take one parameter, an instance of
:attr:`response_class` and return a new response object or the
same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
The return values of teardown functions are ignored.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead it will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
The return values of teardown functions are ignored.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def shell_context_processor(self, f):
"""Registers a shell context processor function.
.. versionadded:: 0.11
"""
self.shell_context_processors.append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def _find_error_handler(self, e):
"""Finds a registered error handler for the request’s blueprint.
Otherwise falls back to the app, returns None if not a suitable
handler is found.
"""
exc_class, code = self._get_exc_class_and_code(type(e))
def find_handler(handler_map):
if not handler_map:
return
queue = deque(exc_class.__mro__)
# Protect from geniuses who might create circular references in
# __mro__
done = set()
while queue:
cls = queue.popleft()
if cls in done:
continue
done.add(cls)
handler = handler_map.get(cls)
if handler is not None:
# cache for next time exc_class is raised
handler_map[exc_class] = handler
return handler
queue.extend(cls.__mro__)
# try blueprint handlers
handler = find_handler(self.error_handler_spec
.get(request.blueprint, {})
.get(code))
if handler is not None:
return handler
# fall back to app handlers
return find_handler(self.error_handler_spec[None].get(code))
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
handler = self._find_error_handler(e)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return ``False`` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
This is called for all HTTP exceptions raised by a view function.
If it returns ``True`` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
handler = self._find_error_handler(e)
if handler is None:
reraise(exc_type, exc_value, tb)
return handler(e)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self._find_error_handler(InternalServerError())
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
for func in self.before_first_request_funcs:
func()
self._got_first_request = True
def make_default_options_response(self):
"""This method is called to create the default ``OPTIONS`` response.
This can be changed through subclassing to change the default
behavior of ``OPTIONS`` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns ``True`` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` or ``(response, headers)``
where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list or
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status_or_headers = headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers,
status=status_or_headers)
headers = status_or_headers = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status_or_headers is not None:
if isinstance(status_or_headers, string_types):
rv.status = status_or_headers
else:
rv.status_code = status_or_headers
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as e:
# make error available outside except block (py3)
error = e
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call each :meth:`before_request` decorated function, passing no
arguments.
If any of these functions returns a value, it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=_sentinel):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=_sentinel):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is _sentinel:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the ``with`` statement because the request is only bound
to the current context for the duration of the ``with`` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the ``with`` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and ``with`` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:class:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
| mit |
apache/airflow | tests/models/test_pool.py | 2 | 4794 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import settings
from airflow.models import DAG
from airflow.models.pool import Pool
from airflow.models.taskinstance import TaskInstance as TI
from airflow.operators.dummy import DummyOperator
from airflow.utils import timezone
from airflow.utils.state import State
from tests.test_utils.db import clear_db_pools, clear_db_runs, set_default_pool_slots
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestPool(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
def tearDown(self):
clear_db_runs()
clear_db_pools()
def test_open_slots(self):
pool = Pool(pool='test_pool', slots=5)
dag = DAG(
dag_id='test_open_slots',
start_date=DEFAULT_DATE,
)
op1 = DummyOperator(task_id='dummy1', dag=dag, pool='test_pool')
op2 = DummyOperator(task_id='dummy2', dag=dag, pool='test_pool')
ti1 = TI(task=op1, execution_date=DEFAULT_DATE)
ti2 = TI(task=op2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
session = settings.Session
session.add(pool)
session.add(ti1)
session.add(ti2)
session.commit()
session.close()
assert 3 == pool.open_slots()
assert 1 == pool.running_slots()
assert 1 == pool.queued_slots()
assert 2 == pool.occupied_slots()
assert {
"default_pool": {
"open": 128,
"queued": 0,
"total": 128,
"running": 0,
},
"test_pool": {
"open": 3,
"queued": 1,
"running": 1,
"total": 5,
},
} == pool.slots_stats()
def test_infinite_slots(self):
pool = Pool(pool='test_pool', slots=-1)
dag = DAG(
dag_id='test_infinite_slots',
start_date=DEFAULT_DATE,
)
op1 = DummyOperator(task_id='dummy1', dag=dag, pool='test_pool')
op2 = DummyOperator(task_id='dummy2', dag=dag, pool='test_pool')
ti1 = TI(task=op1, execution_date=DEFAULT_DATE)
ti2 = TI(task=op2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
session = settings.Session
session.add(pool)
session.add(ti1)
session.add(ti2)
session.commit()
session.close()
assert float('inf') == pool.open_slots()
assert 1 == pool.running_slots()
assert 1 == pool.queued_slots()
assert 2 == pool.occupied_slots()
assert {
"default_pool": {
"open": 128,
"queued": 0,
"total": 128,
"running": 0,
},
"test_pool": {
"open": float('inf'),
"queued": 1,
"running": 1,
"total": float('inf'),
},
} == pool.slots_stats()
def test_default_pool_open_slots(self):
set_default_pool_slots(5)
assert 5 == Pool.get_default_pool().open_slots()
dag = DAG(
dag_id='test_default_pool_open_slots',
start_date=DEFAULT_DATE,
)
op1 = DummyOperator(task_id='dummy1', dag=dag)
op2 = DummyOperator(task_id='dummy2', dag=dag, pool_slots=2)
ti1 = TI(task=op1, execution_date=DEFAULT_DATE)
ti2 = TI(task=op2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
session = settings.Session
session.add(ti1)
session.add(ti2)
session.commit()
session.close()
assert 2 == Pool.get_default_pool().open_slots()
assert {
"default_pool": {
"open": 2,
"queued": 2,
"total": 5,
"running": 1,
}
} == Pool.slots_stats()
| apache-2.0 |
BlueBrain/NeuroM | examples/soma_radius_fit.py | 2 | 3139 | #!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Extract a distribution for the soma radii of the population (list) of neurons.
for the soma radii of the population (list) of neurons.
"""
import argparse
import neurom as nm
from neurom import stats as st
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Morphology fit distribution extractor',
epilog='Note: Prints the optimal distribution and corresponding parameters.')
parser.add_argument('datapath',
help='Path to morphology data file or directory')
return parser.parse_args()
def test_multiple_distr(filepath):
"""Runs the distribution fit for multiple distributions and returns
the optimal distribution along with the corresponding parameters.
"""
# load a neuron from an SWC file
population = nm.load_neurons(filepath)
# Create a list of basic distributions
distr_to_check = ('norm', 'expon', 'uniform')
# Get the soma radii of a population of neurons
soma_size = nm.get('soma_radii', population)
# Find the best fit distribution
return st.optimal_distribution(soma_size, distr_to_check)
if __name__ == '__main__':
args = parse_args()
data_path = args.datapath
result = test_multiple_distr(data_path)
print("Optimal distribution fit for soma radius is: %s with parameters %s" %
(result.type, result.params))
| bsd-3-clause |
chand3040/cloud_that | common/djangoapps/lang_pref/middleware.py | 80 | 1121 | """
Middleware for Language Preferences
"""
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference
from lang_pref import LANGUAGE_KEY
# TODO PLAT-671 Import from Django 1.8
# from django.utils.translation import LANGUAGE_SESSION_KEY
from django_locale.trans_real import LANGUAGE_SESSION_KEY
class LanguagePreferenceMiddleware(object):
"""
Middleware for user preferences.
Ensures that, once set, a user's preferences are reflected in the page
whenever they are logged in.
"""
def process_request(self, request):
"""
If a user's UserPreference contains a language preference, use the user's preference.
"""
# If the user is logged in, check for their language preference
if request.user.is_authenticated():
# Get the user's language preference
user_pref = get_user_preference(request.user, LANGUAGE_KEY)
# Set it to the LANGUAGE_SESSION_KEY (Django-specific session setting governing language pref)
if user_pref:
request.session[LANGUAGE_SESSION_KEY] = user_pref
| agpl-3.0 |
kevalds51/sympy | sympy/physics/vector/frame.py | 47 | 27251 | from sympy import (diff, trigsimp, expand, sin, cos, solve, Symbol, sympify,
eye, ImmutableMatrix as Matrix)
from sympy.core.compatibility import string_types, u, range
from sympy.physics.vector.vector import Vector, _check_vector
__all__ = ['CoordinateSym', 'ReferenceFrame']
class CoordinateSym(Symbol):
"""
A coordinate symbol/base scalar associated wrt a Reference Frame.
Ideally, users should not instantiate this class. Instances of
this class must only be accessed through the corresponding frame
as 'frame[index]'.
CoordinateSyms having the same frame and index parameters are equal
(even though they may be instantiated separately).
Parameters
==========
name : string
The display name of the CoordinateSym
frame : ReferenceFrame
The reference frame this base scalar belongs to
index : 0, 1 or 2
The index of the dimension denoted by this coordinate variable
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, CoordinateSym
>>> A = ReferenceFrame('A')
>>> A[1]
A_y
>>> type(A[0])
<class 'sympy.physics.vector.frame.CoordinateSym'>
>>> a_y = CoordinateSym('a_y', A, 1)
>>> a_y == A[1]
True
"""
def __new__(cls, name, frame, index):
obj = super(CoordinateSym, cls).__new__(cls, name)
_check_frame(frame)
if index not in range(0, 3):
raise ValueError("Invalid index specified")
obj._id = (frame, index)
return obj
@property
def frame(self):
return self._id[0]
def __eq__(self, other):
#Check if the other object is a CoordinateSym of the same frame
#and same index
if isinstance(other, CoordinateSym):
if other._id == self._id:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return tuple((self._id[0].__hash__(), self._id[1])).__hash__()
class ReferenceFrame(object):
"""A reference frame in classical mechanics.
ReferenceFrame is a class used to represent a reference frame in classical
mechanics. It has a standard basis of three unit vectors in the frame's
x, y, and z directions.
It also can have a rotation relative to a parent frame; this rotation is
defined by a direction cosine matrix relating this frame's basis vectors to
the parent frame's basis vectors. It can also have an angular velocity
vector, defined in another frame.
"""
def __init__(self, name, indices=None, latexs=None, variables=None):
"""ReferenceFrame initialization method.
A ReferenceFrame has a set of orthonormal basis vectors, along with
orientations relative to other ReferenceFrames and angular velocities
relative to other ReferenceFrames.
Parameters
==========
indices : list (of strings)
If custom indices are desired for console, pretty, and LaTeX
printing, supply three as a list. The basis vectors can then be
accessed with the get_item method.
latexs : list (of strings)
If custom names are desired for LaTeX printing of each basis
vector, supply the names here in a list.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> N = ReferenceFrame('N')
>>> N.x
N.x
>>> O = ReferenceFrame('O', indices=('1', '2', '3'))
>>> O.x
O['1']
>>> O['1']
O['1']
>>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3'))
>>> vlatex(P.x)
'A1'
"""
if not isinstance(name, string_types):
raise TypeError('Need to supply a valid name')
# The if statements below are for custom printing of basis-vectors for
# each frame.
# First case, when custom indices are supplied
if indices is not None:
if not isinstance(indices, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(indices) != 3:
raise ValueError('Supply 3 indices')
for i in indices:
if not isinstance(i, string_types):
raise TypeError('Indices must be strings')
self.str_vecs = [(name + '[\'' + indices[0] + '\']'),
(name + '[\'' + indices[1] + '\']'),
(name + '[\'' + indices[2] + '\']')]
self.pretty_vecs = [(name.lower() + u("_") + indices[0]),
(name.lower() + u("_") + indices[1]),
(name.lower() + u("_") + indices[2])]
self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[0])), (r"\mathbf{\hat{%s}_{%s}}" %
(name.lower(), indices[1])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[2]))]
self.indices = indices
# Second case, when no custom indices are supplied
else:
self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')]
self.pretty_vecs = [name.lower() + u("_x"),
name.lower() + u("_y"),
name.lower() + u("_z")]
self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()),
(r"\mathbf{\hat{%s}_y}" % name.lower()),
(r"\mathbf{\hat{%s}_z}" % name.lower())]
self.indices = ['x', 'y', 'z']
# Different step, for custom latex basis vectors
if latexs is not None:
if not isinstance(latexs, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(latexs) != 3:
raise ValueError('Supply 3 indices')
for i in latexs:
if not isinstance(i, string_types):
raise TypeError('Latex entries must be strings')
self.latex_vecs = latexs
self.name = name
self._var_dict = {}
#The _dcm_dict dictionary will only store the dcms of parent-child
#relationships. The _dcm_cache dictionary will work as the dcm
#cache.
self._dcm_dict = {}
self._dcm_cache = {}
self._ang_vel_dict = {}
self._ang_acc_dict = {}
self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict]
self._cur = 0
self._x = Vector([(Matrix([1, 0, 0]), self)])
self._y = Vector([(Matrix([0, 1, 0]), self)])
self._z = Vector([(Matrix([0, 0, 1]), self)])
#Associate coordinate symbols wrt this frame
if variables is not None:
if not isinstance(variables, (tuple, list)):
raise TypeError('Supply the variable names as a list/tuple')
if len(variables) != 3:
raise ValueError('Supply 3 variable names')
for i in variables:
if not isinstance(i, string_types):
raise TypeError('Variable names must be strings')
else:
variables = [name + '_x', name + '_y', name + '_z']
self.varlist = (CoordinateSym(variables[0], self, 0), \
CoordinateSym(variables[1], self, 1), \
CoordinateSym(variables[2], self, 2))
def __getitem__(self, ind):
"""
Returns basis vector for the provided index, if the index is a string.
If the index is a number, returns the coordinate variable correspon-
-ding to that index.
"""
if not isinstance(ind, str):
if ind < 3:
return self.varlist[ind]
else:
raise ValueError("Invalid index provided")
if self.indices[0] == ind:
return self.x
if self.indices[1] == ind:
return self.y
if self.indices[2] == ind:
return self.z
else:
raise ValueError('Not a defined index')
def __iter__(self):
return iter([self.x, self.y, self.z])
def __str__(self):
"""Returns the name of the frame. """
return self.name
__repr__ = __str__
def _dict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._dlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + self.name +
' and ' + other.name)
def _w_diff_dcm(self, otherframe):
"""Angular velocity from time differentiating the DCM. """
from sympy.physics.vector.functions import dynamicsymbols
dcm2diff = self.dcm(otherframe)
diffed = dcm2diff.diff(dynamicsymbols._t)
angvelmat = diffed * dcm2diff.T
w1 = trigsimp(expand(angvelmat[7]), recursive=True)
w2 = trigsimp(expand(angvelmat[2]), recursive=True)
w3 = trigsimp(expand(angvelmat[3]), recursive=True)
return -Vector([(Matrix([w1, w2, w3]), self)])
def variable_map(self, otherframe):
"""
Returns a dictionary which expresses the coordinate variables
of this frame in terms of the variables of otherframe.
If Vector.simp is True, returns a simplified version of the mapped
values. Else, returns them without simplification.
Simplification of the expressions may take time.
Parameters
==========
otherframe : ReferenceFrame
The other frame to map the variables to
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> A = ReferenceFrame('A')
>>> q = dynamicsymbols('q')
>>> B = A.orientnew('B', 'Axis', [q, A.z])
>>> A.variable_map(B)
{A_x: B_x*cos(q(t)) - B_y*sin(q(t)), A_y: B_x*sin(q(t)) + B_y*cos(q(t)), A_z: B_z}
"""
_check_frame(otherframe)
if (otherframe, Vector.simp) in self._var_dict:
return self._var_dict[(otherframe, Vector.simp)]
else:
vars_matrix = self.dcm(otherframe) * Matrix(otherframe.varlist)
mapping = {}
for i, x in enumerate(self):
if Vector.simp:
mapping[self.varlist[i]] = trigsimp(vars_matrix[i], method='fu')
else:
mapping[self.varlist[i]] = vars_matrix[i]
self._var_dict[(otherframe, Vector.simp)] = mapping
return mapping
def ang_acc_in(self, otherframe):
"""Returns the angular acceleration Vector of the ReferenceFrame.
Effectively returns the Vector:
^N alpha ^B
which represent the angular acceleration of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular acceleration is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
_check_frame(otherframe)
if otherframe in self._ang_acc_dict:
return self._ang_acc_dict[otherframe]
else:
return self.ang_vel_in(otherframe).dt(otherframe)
def ang_vel_in(self, otherframe):
"""Returns the angular velocity Vector of the ReferenceFrame.
Effectively returns the Vector:
^N omega ^B
which represent the angular velocity of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular velocity is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
_check_frame(otherframe)
flist = self._dict_list(otherframe, 1)
outvec = Vector(0)
for i in range(len(flist) - 1):
outvec += flist[i]._ang_vel_dict[flist[i + 1]]
return outvec
def dcm(self, otherframe):
"""The direction cosine matrix between frames.
This gives the DCM between this frame and the otherframe.
The format is N.xyz = N.dcm(B) * B.xyz
A SymPy Matrix is returned.
Parameters
==========
otherframe : ReferenceFrame
The otherframe which the DCM is generated to.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> N.dcm(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
"""
_check_frame(otherframe)
#Check if the dcm wrt that frame has already been calculated
if otherframe in self._dcm_cache:
return self._dcm_cache[otherframe]
flist = self._dict_list(otherframe, 0)
outdcm = eye(3)
for i in range(len(flist) - 1):
outdcm = outdcm * flist[i]._dcm_dict[flist[i + 1]]
#After calculation, store the dcm in dcm cache for faster
#future retrieval
self._dcm_cache[otherframe] = outdcm
otherframe._dcm_cache[self] = outdcm.T
return outdcm
def orient(self, parent, rot_type, amounts, rot_order=''):
"""Defines the orientation of this frame relative to a parent frame.
Parameters
==========
parent : ReferenceFrame
The frame that this ReferenceFrame will have its orientation matrix
defined in relation to.
rot_type : str
The type of orientation matrix that is being created. Supported
types are 'Body', 'Space', 'Quaternion', and 'Axis'. See examples
for correct usage.
amounts : list OR value
The quantities that the orientation matrix will be defined by.
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q0, q1, q2, q3, q4 = symbols('q0 q1 q2 q3 q4')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
Now we have a choice of how to implement the orientation. First is
Body. Body orientation takes this reference frame through three
successive simple rotations. Acceptable rotation orders are of length
3, expressed in XYZ or 123, and cannot have a rotation about about an
axis twice in a row.
>>> B.orient(N, 'Body', [q1, q2, q3], '123')
>>> B.orient(N, 'Body', [q1, q2, 0], 'ZXZ')
>>> B.orient(N, 'Body', [0, 0, 0], 'XYX')
Next is Space. Space is like Body, but the rotations are applied in the
opposite order.
>>> B.orient(N, 'Space', [q1, q2, q3], '312')
Next is Quaternion. This orients the new ReferenceFrame with
Quaternions, defined as a finite rotation about lambda, a unit vector,
by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
>>> B.orient(N, 'Quaternion', [q0, q1, q2, q3])
Last is Axis. This is a rotation about an arbitrary, non-time-varying
axis by some angle. The axis is supplied as a Vector. This is how
simple rotations are defined.
>>> B.orient(N, 'Axis', [q1, N.x + 2 * N.y])
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
amounts = list(amounts)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
def _rot(axis, angle):
"""DCM for simple axis 1,2,or 3 rotations. """
if axis == 1:
return Matrix([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == 2:
return Matrix([[cos(angle), 0, sin(angle)],
[0, 1, 0],
[-sin(angle), 0, cos(angle)]])
elif axis == 3:
return Matrix([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
rot_order = str(
rot_order).upper() # Now we need to make sure XYZ = 123
rot_type = rot_type.upper()
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not rot_order in approved_orders:
raise TypeError('The supplied order is not an approved type')
parent_orient = []
if rot_type == 'AXIS':
if not rot_order == '':
raise TypeError('Axis orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 2)):
raise TypeError('Amounts are a list or tuple of length 2')
theta = amounts[0]
axis = amounts[1]
axis = _check_vector(axis)
if not axis.dt(parent) == 0:
raise ValueError('Axis cannot be time-varying')
axis = axis.express(parent).normalize()
axis = axis.args[0][0]
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) + axis * axis.T)
elif rot_type == 'QUATERNION':
if not rot_order == '':
raise TypeError(
'Quaternion orientation takes no rotation order')
if not (isinstance(amounts, (list, tuple)) & (len(amounts) == 4)):
raise TypeError('Amounts are a list or tuple of length 4')
q0, q1, q2, q3 = amounts
parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 **
2, 2 * (q1 * q2 - q0 * q3), 2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3), q0 ** 2 - q1 ** 2 + q2 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1)], [2 * (q1 * q3 - q0 * q2), 2 * (q0 *
q1 + q2 * q3), q0 ** 2 - q1 ** 2 - q2 ** 2 + q3 ** 2]]))
elif rot_type == 'BODY':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a1, amounts[0]) * _rot(a2, amounts[1])
* _rot(a3, amounts[2]))
elif rot_type == 'SPACE':
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Space orientation takes 3 values & 3 orders')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
parent_orient = (_rot(a3, amounts[2]) * _rot(a2, amounts[1])
* _rot(a1, amounts[0]))
else:
raise NotImplementedError('That is not an implemented rotation')
#Reset the _dcm_cache of this frame, and remove it from the _dcm_caches
#of the frames it is linked to. Also remove it from the _dcm_dict of
#its parent
frames = self._dcm_cache.keys()
for frame in frames:
if frame in self._dcm_dict:
del frame._dcm_dict[self]
del frame._dcm_cache[self]
#Add the dcm relationship to _dcm_dict
self._dcm_dict = self._dlist[0] = {}
self._dcm_dict.update({parent: parent_orient.T})
parent._dcm_dict.update({self: parent_orient})
#Also update the dcm cache after resetting it
self._dcm_cache = {}
self._dcm_cache.update({parent: parent_orient.T})
parent._dcm_cache.update({self: parent_orient})
if rot_type == 'QUATERNION':
t = dynamicsymbols._t
q0, q1, q2, q3 = amounts
q0d = diff(q0, t)
q1d = diff(q1, t)
q2d = diff(q2, t)
q3d = diff(q3, t)
w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)
w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)
w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)
wvec = Vector([(Matrix([w1, w2, w3]), self)])
elif rot_type == 'AXIS':
thetad = (amounts[0]).diff(dynamicsymbols._t)
wvec = thetad * amounts[1].express(parent).normalize()
else:
try:
from sympy.polys.polyerrors import CoercionFailed
from sympy.physics.vector.functions import kinematic_equations
q1, q2, q3 = amounts
u1, u2, u3 = dynamicsymbols('u1, u2, u3')
templist = kinematic_equations([u1, u2, u3], [q1, q2, q3],
rot_type, rot_order)
templist = [expand(i) for i in templist]
td = solve(templist, [u1, u2, u3])
u1 = expand(td[u1])
u2 = expand(td[u2])
u3 = expand(td[u3])
wvec = u1 * self.x + u2 * self.y + u3 * self.z
except (CoercionFailed, AssertionError):
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orientnew(self, newname, rot_type, amounts, rot_order='', variables=None,
indices=None, latexs=None):
"""Creates a new ReferenceFrame oriented with respect to this Frame.
See ReferenceFrame.orient() for acceptable rotation types, amounts,
and orders. Parent is going to be self.
Parameters
==========
newname : str
The name for the new ReferenceFrame
rot_type : str
The type of orientation matrix that is being created.
amounts : list OR value
The quantities that the orientation matrix will be defined by.
rot_order : str
If applicable, the order of a series of rotations.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
.orient() documentation:\n
========================
"""
newframe = self.__class__(newname, variables, indices, latexs)
newframe.orient(self, rot_type, amounts, rot_order)
return newframe
orientnew.__doc__ += orient.__doc__
def set_ang_acc(self, otherframe, value):
"""Define the angular acceleration Vector in a ReferenceFrame.
Defines the angular acceleration of this ReferenceFrame, in another.
Angular acceleration can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular acceleration in
value : Vector
The Vector representing angular acceleration
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_acc_dict.update({otherframe: value})
otherframe._ang_acc_dict.update({self: -value})
def set_ang_vel(self, otherframe, value):
"""Define the angular velocity vector in a ReferenceFrame.
Defines the angular velocity of this ReferenceFrame, in another.
Angular velocity can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular velocity in
value : Vector
The Vector representing angular velocity
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_vel_dict.update({otherframe: value})
otherframe._ang_vel_dict.update({self: -value})
@property
def x(self):
"""The basis Vector for the ReferenceFrame, in the x direction. """
return self._x
@property
def y(self):
"""The basis Vector for the ReferenceFrame, in the y direction. """
return self._y
@property
def z(self):
"""The basis Vector for the ReferenceFrame, in the z direction. """
return self._z
def _check_frame(other):
from .vector import VectorTypeError
if not isinstance(other, ReferenceFrame):
raise VectorTypeError(other, ReferenceFrame('A'))
| bsd-3-clause |
Centre-Alt-Rendiment-Esportiu/att | old_project/Python/win_libs/scipy/weave/common_info.py | 100 | 4254 | """ Generic support code for:
error handling code found in every weave module
local/global dictionary access code for inline() modules
swig pointer (old style) conversion support
"""
from __future__ import absolute_import, print_function
from . import base_info
module_support_code = \
"""
// global None value for use in functions.
namespace py {
object None = object(Py_None);
}
const char* find_type(PyObject* py_obj)
{
if(py_obj == NULL) return "C NULL value";
if(PyCallable_Check(py_obj)) return "callable";
if(PyString_Check(py_obj)) return "string";
if(PyInt_Check(py_obj)) return "int";
if(PyFloat_Check(py_obj)) return "float";
if(PyDict_Check(py_obj)) return "dict";
if(PyList_Check(py_obj)) return "list";
if(PyTuple_Check(py_obj)) return "tuple";
if(PyFile_Check(py_obj)) return "file";
if(PyModule_Check(py_obj)) return "module";
//should probably do more intergation (and thinking) on these.
if(PyCallable_Check(py_obj) && PyInstance_Check(py_obj)) return "callable";
if(PyInstance_Check(py_obj)) return "instance";
if(PyCallable_Check(py_obj)) return "callable";
return "unknown type";
}
void throw_error(PyObject* exc, const char* msg)
{
//printf("setting python error: %s\\n",msg);
PyErr_SetString(exc, msg);
//printf("throwing error\\n");
throw 1;
}
void handle_bad_type(PyObject* py_obj, const char* good_type, const char* var_name)
{
char msg[500];
sprintf(msg,"received '%s' type instead of '%s' for variable '%s'",
find_type(py_obj),good_type,var_name);
throw_error(PyExc_TypeError,msg);
}
void handle_conversion_error(PyObject* py_obj, const char* good_type, const char* var_name)
{
char msg[500];
sprintf(msg,"Conversion Error:, received '%s' type instead of '%s' for variable '%s'",
find_type(py_obj),good_type,var_name);
throw_error(PyExc_TypeError,msg);
}
"""
#include "compile.h" /* Scary dangerous stuff */
#include "frameobject.h" /* Scary dangerous stuff */
class basic_module_info(base_info.base_info):
_headers = ['"Python.h"','"compile.h"','"frameobject.h"']
_support_code = [module_support_code]
#----------------------------------------------------------------------------
# inline() generated support code
#
# The following two function declarations handle access to variables in the
# global and local dictionaries for inline functions.
#----------------------------------------------------------------------------
get_variable_support_code = \
"""
void handle_variable_not_found(const char* var_name)
{
char msg[500];
sprintf(msg,"Conversion Error: variable '%s' not found in local or global scope.",var_name);
throw_error(PyExc_NameError,msg);
}
PyObject* get_variable(const char* name,PyObject* locals, PyObject* globals)
{
// no checking done for error -- locals and globals should
// already be validated as dictionaries. If var is NULL, the
// function calling this should handle it.
PyObject* var = NULL;
var = PyDict_GetItemString(locals,name);
if (!var)
{
var = PyDict_GetItemString(globals,name);
}
if (!var)
handle_variable_not_found(name);
return var;
}
"""
py_to_raw_dict_support_code = \
"""
PyObject* py_to_raw_dict(PyObject* py_obj, const char* name)
{
// simply check that the value is a valid dictionary pointer.
if(!py_obj || !PyDict_Check(py_obj))
handle_bad_type(py_obj, "dictionary", name);
return py_obj;
}
"""
class inline_info(base_info.base_info):
_support_code = [get_variable_support_code, py_to_raw_dict_support_code]
#----------------------------------------------------------------------------
# swig pointer support code
#
# The support code for swig is just slirped in from the swigptr.c file
# from the *old* swig distribution. The code from swigptr.c is now a string
# in swigptr.py to ease the process of incorporating it into py2exe
# installations. New style swig pointers are not yet supported.
#----------------------------------------------------------------------------
from . import swigptr
swig_support_code = swigptr.swigptr_code
class swig_info(base_info.base_info):
_support_code = [swig_support_code]
| gpl-3.0 |
mateor/pants | tests/python/pants_test/subsystem/subsystem_util.py | 3 | 5795 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from contextlib import contextmanager
from pants.base.deprecated import deprecated
from pants.option.optionable import Optionable
from pants.option.scope import ScopeInfo
from pants.subsystem.subsystem import Subsystem
from pants_test.option.util.fakes import (create_option_values_for_optionable,
create_options_for_optionables)
_deprecation_msg = ("Use the for_subsystems and options arguments to BaseTest.context(), or use "
"the methods init_subsystem(), global_subsystem_instance() in this module.")
@deprecated('1.4.0', _deprecation_msg)
def create_subsystem(subsystem_type, scope='test-scope', **options):
"""Creates a Subsystem for test.
:API: public
:param type subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
to create.
:param string scope: The scope to create the subsystem in.
:param **options: Keyword args representing option values explicitly set via the command line.
"""
if not issubclass(subsystem_type, Subsystem):
raise TypeError('The given `subsystem_type` was not a subclass of `Subsystem`: {}'
.format(subsystem_type))
option_values = create_option_values_for_optionable(subsystem_type, **options)
return subsystem_type(scope, option_values)
@contextmanager
@deprecated('1.4.0', _deprecation_msg)
def subsystem_instance(subsystem_type, scope=None, **options):
"""Creates a Subsystem instance for test.
:API: public
:param type subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
to create.
:param string scope: An optional scope to create the subsystem in; defaults to global.
:param **options: Keyword args representing option values explicitly set via the command line.
"""
if not issubclass(subsystem_type, Subsystem):
raise TypeError('The given `subsystem_type` was not a subclass of `Subsystem`: {}'
.format(subsystem_type))
optionables = Subsystem.closure([subsystem_type])
updated_options = dict(Subsystem._options.items()) if Subsystem._options else {}
if options:
updated_options.update(options)
Subsystem._options = create_options_for_optionables(optionables, options=updated_options)
try:
if scope is None:
yield subsystem_type.global_instance()
else:
class ScopedOptionable(Optionable):
options_scope = scope
options_scope_category = ScopeInfo.SUBSYSTEM
yield subsystem_type.scoped_instance(ScopedOptionable)
finally:
Subsystem.reset()
def global_subsystem_instance(subsystem_type, options=None):
"""Returns the global instance of a subsystem, for use in tests.
:API: public
:param type subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
to create.
:param options: dict of scope -> (dict of option name -> value).
The scopes may be that of the global instance of the subsystem (i.e.,
subsystem_type.options_scope) and/or the scopes of instances of the
subsystems it transitively depends on.
"""
init_subsystem(subsystem_type, options)
return subsystem_type.global_instance()
def init_subsystems(subsystem_types, options=None):
"""Initialize subsystems for use in tests.
Does not create an instance. This function is for setting up subsystems that the code
under test creates.
Note that there is some redundancy between this function and BaseTest.context(for_subsystems=...).
TODO: Fix that.
:API: public
:param list subsystem_types: The subclasses of :class:`pants.subsystem.subsystem.Subsystem`
to create.
:param options: dict of scope -> (dict of option name -> value).
The scopes may be those of the global instances of the subsystems (i.e.,
subsystem_type.options_scope) and/or the scopes of instances of the
subsystems they transitively depend on.
"""
for s in subsystem_types:
if not Subsystem.is_subsystem_type(s):
raise TypeError('{} is not a subclass of `Subsystem`'.format(s))
optionables = Subsystem.closure(subsystem_types)
if options:
allowed_scopes = {o.options_scope for o in optionables}
for scope in options.keys():
if scope not in allowed_scopes:
raise ValueError('`{}` is not the scope of any of these subsystems: {}'.format(
scope, optionables))
# Don't trample existing subsystem options, in case a test has set up some
# other subsystems in some other way.
updated_options = dict(Subsystem._options.items()) if Subsystem._options else {}
if options:
updated_options.update(options)
Subsystem.set_options(create_options_for_optionables(optionables, options=updated_options))
def init_subsystem(subsystem_type, options=None):
"""
Singular form of :func:`pants_test.subsystem.subsystem_util.init_subsystems`
:API: public
:param subsystem_type: The subclass of :class:`pants.subsystem.subsystem.Subsystem`
to create.
:param options: dict of scope -> (dict of option name -> value).
The scopes may be those of the global instance of the subsystem (i.e.,
subsystem_type.options_scope) and/or the scopes of instance of the
subsystem it transitively depends on.
"""
init_subsystems([subsystem_type], options)
| apache-2.0 |
CalvinNeo/PyGeo | countpca_segmentation_2.py | 1 | 7423 | #coding:utf8
import numpy as np, scipy
import pylab as pl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
from matplotlib import cm
from matplotlib import mlab
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from itertools import *
import collections
from multiprocessing import Pool
import random
from scipy.optimize import leastsq
from adasurf import AdaSurfConfig, adasurf, paint_surfs, identifysurf, point_normalize, Surface
ELAPSE_SEG = 0
class SurfSegConfig:
def __init__(self):
self.slice_count = 4
self.origin_points = 5
self.most_combination_points = 20
self.same_threshold = 0.1 # the smaller, the more accurate when judging two surfaces are identical, more surfaces can be generated
self.pointsame_threshold = 1.0
self.filter_rate = 0.08
self.filter_count = 50
self.ori_adarate = 2.0
self.step_adarate = 1.0
self.max_adarate = 2.0
self.split_by_count = True
self.weak_abort = 45
def paint_points(points, show = True, title = '', xlim = None, ylim = None, zlim = None):
fig = pl.figure()
ax = fig.add_subplot(111, projection='3d')
if xlim == None:
xlim = (np.min(points[:, 0]), np.max(points[:, 0]))
if ylim == None:
ylim = (np.min(points[:, 1]), np.max(points[:, 1]))
if zlim == None:
zlim = (np.min(points[:, 2]), np.max(points[:, 2]))
x1 = points[:, 0]
y1 = points[:, 1]
z1 = points[:, 2]
ax.scatter(x1, y1, z1, c='r')
ax.set_zlim(zlim[0], zlim[1])
ax.set_ylim(ylim[0], ylim[1])
ax.set_xlim(xlim[0], xlim[1])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
pl.title(title)
if show:
pl.show()
return fig
def surf_segmentation(points, config, paint_when_end = False):
global ELAPSE_SEG
config.slice_count = min(int(len(points) / config.origin_points), config.slice_count)
assert len(points) / config.slice_count >= config.origin_points
adasurconfig = AdaSurfConfig({'origin_points': config.origin_points
, 'most_combination_points': config.most_combination_points
, 'same_threshold': config.same_threshold
, 'filter_rate': config.filter_rate
, 'ori_adarate': config.ori_adarate
, 'step_adarate': config.step_adarate
, 'max_adarate': config.max_adarate
, 'pointsame_threshold': config.pointsame_threshold
, 'filter_count' : config.filter_count
, 'weak_abort' : config.weak_abort
})
surfs = []
slice_fig = []
npoints = point_normalize(points)
starttime = time.clock()
xlim = (np.min(npoints[:, 0]), np.max(npoints[:, 0]))
ylim = (np.min(npoints[:, 1]), np.max(npoints[:, 1]))
zlim = (np.min(npoints[:, 2]), np.max(npoints[:, 2]))
pca_md = mlab.PCA(np.copy(npoints))
projection0_direction = None
# projection0_direction = pca_md.Y[0]
# projection0 = np.inner(projection0_direction, npoints)
projection0 = npoints[:, 0]
if config.split_by_count:
step_count = len(projection0) / config.slice_count
pointsets = [np.array([]).reshape(0,3)] * config.slice_count
sorted_projection0_index = np.argsort(projection0)
current_slot_count, ptsetid = 0, 0
for index in sorted_projection0_index:
pointsets[ptsetid] = np.vstack((pointsets[ptsetid], npoints[index, :]))
current_slot_count += 1
if current_slot_count > step_count:
current_slot_count = 0
ptsetid += 1
else:
projection0min, projection0max = np.min(projection0), np.max(projection0)
step_len = (projection0max - projection0min) / config.slice_count
pointsets = [np.array([]).reshape(0,3)] * config.slice_count
for i in xrange(len(projection0)):
if projection0[i] == projection0max:
ptsetid = config.slice_count - 1
else:
ptsetid = int((projection0[i] - projection0min) / step_len)
pointsets[ptsetid] = np.vstack((pointsets[ptsetid], npoints[i]))
# random.shuffle(pointsets)
partial_surfs, fail = [], np.array([]).reshape(0,3)
# for (ptset, ptsetindex) in zip(pointsets, range(len(pointsets))):
# print "slice", len(ptset), xlim, ylim, zlim
# paint_points(ptset, xlim = xlim, ylim = ylim, zlim = zlim)
for (ptset, ptsetindex) in zip(pointsets, range(len(pointsets))):
print "--------------------------------------"
print "before segment", ptsetindex, '/', len(pointsets)
print 'derived surfs:'
# print '---000', ptset.shape, np.array(fail).shape, np.array(fail), fail
if fail == None:
allptfortest = np.array(ptset)
else:
allptfortest = np.vstack((ptset, np.array(fail).reshape(-1,3)))
print "len of surf is: ", len(partial_surfs), ", len of points is: ", len(allptfortest)
if allptfortest != None and len(allptfortest) > 0 :
partial_surfs, _, fail, extradata = identifysurf(allptfortest, adasurconfig, donorm = False, surfs = partial_surfs, title = str(ptsetindex)
, paint_when_end = paint_when_end, current_direction = projection0_direction)
if paint_when_end:
slice_fig.append(extradata[0])
if fail == None:
print "after segment", ptsetindex, "len of surf", len(partial_surfs), "fail is None", fail
else:
print "after segment", ptsetindex, "len of surf", len(partial_surfs), "len of fail", len(fail)
for x in partial_surfs:
x.printf()
surfs.extend(partial_surfs)
# fig = pl.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(npoints[:, 0], npoints[:, 1], npoints[:, 2], c='r')
# x = np.linspace(0, pca_md.Wt[0, 0] * 100, 300)
# y = np.linspace(0, pca_md.Wt[0, 1] * 100, 300)
# z = np.linspace(0, pca_md.Wt[0, 2] * 100, 300)
# ax.plot(x, y, z, c='k')
# x = np.linspace(0, pca_md.Wt[1, 0] * 100, 300)
# y = np.linspace(0, pca_md.Wt[1, 1] * 100, 300)
# z = np.linspace(0, pca_md.Wt[1, 2] * 100, 300)
# ax.plot(x, y, z, c='g')
# pl.show()
return surfs, npoints, (slice_fig, )
if __name__ == '__main__':
c = np.loadtxt('5.py', comments='#')
config = SurfSegConfig()
print 'config', config.__dict__
import time
starttime = time.clock()
surfs, npoints, extradata = surf_segmentation(c, config, paint_when_end = True)
print "----------BELOW ARE SURFACES---------- count:", len(surfs)
print 'TOTAL: ', time.clock() - starttime
print 'ELAPSE_SEG: ', ELAPSE_SEG
ALL_POINT = 0
for s,i in zip(surfs, range(len(surfs))):
print "SURFACE ", i
print s.args # surface args
print s.residuals # MSE
print len(s.points)
ALL_POINT += len(s.points)
# print s[2] # npoints
print '**************************************'
print 'ALL_POINT: ', ALL_POINT
print '----------BELOW ARE POINTS----------'
# for s,i in zip(surfs, range(len(surfs))):
# print "SURFACE ", i
# print s.points
paint_surfs(surfs, npoints, 'all')
print extradata
for slice_fig in extradata[0]:
slice_fig.show()
| apache-2.0 |
Gabriel0402/zulip | zerver/lib/unminify.py | 115 | 1661 | from __future__ import absolute_import
import re
import os.path
import sourcemap
class SourceMap(object):
'''Map (line, column) pairs from generated to source file.'''
def __init__(self, sourcemap_dir):
self._dir = sourcemap_dir
self._indices = {}
def _index_for(self, minified_src):
'''Return the source map index for minified_src, loading it if not
already loaded.'''
if minified_src not in self._indices:
with open(os.path.join(self._dir, minified_src + '.map')) as fp:
self._indices[minified_src] = sourcemap.load(fp)
return self._indices[minified_src]
def annotate_stacktrace(self, stacktrace):
out = ''
for ln in stacktrace.splitlines():
out += ln + '\n'
match = re.search(r'/static/min/(.+)(\.[0-9a-f]+)\.js:(\d+):(\d+)', ln)
if match:
# Get the appropriate source map for the minified file.
minified_src = match.groups()[0] + '.js'
index = self._index_for(minified_src)
gen_line, gen_col = map(int, match.groups()[2:4])
# The sourcemap lib is 0-based, so subtract 1 from line and col.
try:
result = index.lookup(line=gen_line-1, column=gen_col-1)
out += (' = %s line %d column %d\n' %
(result.src, result.src_line+1, result.src_col+1))
except IndexError:
out += ' [Unable to look up in source map]\n'
if ln.startswith(' at'):
out += '\n'
return out
| apache-2.0 |
thomasem/nova | nova/tests/unit/scheduler/ironic_fakes.py | 45 | 4222 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake nodes for Ironic host manager tests.
"""
from nova import objects
COMPUTE_NODES = [
objects.ComputeNode(
id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
host='host1',
hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=10, free_ram_mb=1024),
objects.ComputeNode(
id=2, local_gb=20, memory_mb=2048, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
host='host2',
hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=20, free_ram_mb=2048),
objects.ComputeNode(
id=3, local_gb=30, memory_mb=3072, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
host='host3',
hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=30, free_ram_mb=3072),
objects.ComputeNode(
id=4, local_gb=40, memory_mb=4096, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
host='host4',
hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=40, free_ram_mb=4096),
# Broken entry
objects.ComputeNode(
id=5, local_gb=50, memory_mb=5120, vcpus=1,
host='fake', cpu_info='baremetal cpu',
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=50, free_ram_mb=5120,
hypervisor_hostname='fake-hyp'),
]
SERVICES = [
objects.Service(host='host1', disabled=False),
objects.Service(host='host2', disabled=True),
objects.Service(host='host3', disabled=False),
objects.Service(host='host4', disabled=False),
]
def get_service_by_host(host):
services = [service for service in SERVICES if service.host == host]
return services[0]
| apache-2.0 |
abhattad4/Digi-Menu | digimenu2/django/conf/locale/zh_CN/formats.py | 634 | 1810 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
)
TIME_INPUT_FORMATS = (
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
)
DATETIME_INPUT_FORMATS = (
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
| bsd-3-clause |
thaim/ansible | test/units/modules/network/nxos/test_nxos_bgp.py | 68 | 5391 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_bgp
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpModule(TestNxosModule):
module = nxos_bgp
def setUp(self):
super(TestNxosBgpModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = []
def test_nxos_bgp(self):
set_module_args(dict(asn=65535, router_id='192.0.2.1'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['router bgp 65535', 'router-id 192.0.2.1'])
def test_nxos_bgp_change_nothing(self):
set_module_args(dict(asn=65535, router_id='192.168.1.1'))
self.execute_module(changed=False)
def test_nxos_bgp_wrong_asn(self):
set_module_args(dict(asn=10, router_id='192.168.1.1'))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Another BGP ASN already exists.')
def test_nxos_bgp_remove(self):
set_module_args(dict(asn=65535, state='absent'))
self.execute_module(changed=True, commands=['no router bgp 65535'])
def test_nxos_bgp_remove_vrf(self):
set_module_args(dict(asn=65535, vrf='test2', state='absent'))
self.execute_module(changed=True, commands=['router bgp 65535', 'no vrf test2'])
def test_nxos_bgp_remove_nonexistant_vrf(self):
set_module_args(dict(asn=65535, vrf='foo', state='absent'))
self.execute_module(changed=False)
def test_nxos_bgp_remove_wrong_asn(self):
set_module_args(dict(asn=10, state='absent'))
self.execute_module(changed=False)
def test_nxos_bgp_vrf(self):
set_module_args(dict(asn=65535, vrf='test', router_id='192.0.2.1'))
result = self.execute_module(changed=True, commands=['router bgp 65535', 'vrf test', 'router-id 192.0.2.1'])
self.assertEqual(result['warnings'], ["VRF test doesn't exist."])
def test_nxos_bgp_global_param(self):
set_module_args(dict(asn=65535, shutdown=True))
self.execute_module(changed=True, commands=['router bgp 65535', 'shutdown'])
def test_nxos_bgp_global_param_outside_default(self):
set_module_args(dict(asn=65535, vrf='test', shutdown=True))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Global params can be modified only under "default" VRF.')
def test_nxos_bgp_default_value(self):
set_module_args(dict(asn=65535, graceful_restart_timers_restart='default'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'graceful-restart restart-time 120']
)
class TestNxosBgp32BitsAS(TestNxosModule):
module = nxos_bgp
def setUp(self):
super(TestNxosBgp32BitsAS, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgp32BitsAS, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config_32_bits_as.cfg')
self.load_config.return_value = []
def test_nxos_bgp_change_nothing(self):
set_module_args(dict(asn='65535.65535', router_id='192.168.1.1'))
self.execute_module(changed=False)
def test_nxos_bgp_wrong_asn(self):
set_module_args(dict(asn='65535.10', router_id='192.168.1.1'))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'Another BGP ASN already exists.')
def test_nxos_bgp_remove(self):
set_module_args(dict(asn='65535.65535', state='absent'))
self.execute_module(changed=True, commands=['no router bgp 65535.65535'])
| mit |
SimVascular/VTK | ThirdParty/Twisted/twisted/spread/banana.py | 60 | 11010 | # -*- test-case-name: twisted.test.test_banana -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Banana -- s-exp based protocol.
Future Plans: This module is almost entirely stable. The same caveat applies
to it as applies to L{twisted.spread.jelly}, however. Read its future plans
for more details.
@author: Glyph Lefkowitz
"""
import copy, cStringIO, struct
from twisted.internet import protocol
from twisted.persisted import styles
from twisted.python import log
class BananaError(Exception):
pass
def int2b128(integer, stream):
if integer == 0:
stream(chr(0))
return
assert integer > 0, "can only encode positive integers"
while integer:
stream(chr(integer & 0x7f))
integer = integer >> 7
def b1282int(st):
"""
Convert an integer represented as a base 128 string into an C{int} or
C{long}.
@param st: The integer encoded in a string.
@type st: C{str}
@return: The integer value extracted from the string.
@rtype: C{int} or C{long}
"""
e = 1
i = 0
for char in st:
n = ord(char)
i += (n * e)
e <<= 7
return i
# delimiter characters.
LIST = chr(0x80)
INT = chr(0x81)
STRING = chr(0x82)
NEG = chr(0x83)
FLOAT = chr(0x84)
# "optional" -- these might be refused by a low-level implementation.
LONGINT = chr(0x85)
LONGNEG = chr(0x86)
# really optional; this is is part of the 'pb' vocabulary
VOCAB = chr(0x87)
HIGH_BIT_SET = chr(0x80)
def setPrefixLimit(limit):
"""
Set the limit on the prefix length for all Banana connections
established after this call.
The prefix length limit determines how many bytes of prefix a banana
decoder will allow before rejecting a potential object as too large.
@type limit: C{int}
@param limit: The number of bytes of prefix for banana to allow when
decoding.
"""
global _PREFIX_LIMIT
_PREFIX_LIMIT = limit
setPrefixLimit(64)
SIZE_LIMIT = 640 * 1024 # 640k is all you'll ever need :-)
class Banana(protocol.Protocol, styles.Ephemeral):
knownDialects = ["pb", "none"]
prefixLimit = None
sizeLimit = SIZE_LIMIT
def setPrefixLimit(self, limit):
"""
Set the prefix limit for decoding done by this protocol instance.
@see: L{setPrefixLimit}
"""
self.prefixLimit = limit
self._smallestLongInt = -2 ** (limit * 7) + 1
self._smallestInt = -2 ** 31
self._largestInt = 2 ** 31 - 1
self._largestLongInt = 2 ** (limit * 7) - 1
def connectionReady(self):
"""Surrogate for connectionMade
Called after protocol negotiation.
"""
def _selectDialect(self, dialect):
self.currentDialect = dialect
self.connectionReady()
def callExpressionReceived(self, obj):
if self.currentDialect:
self.expressionReceived(obj)
else:
# this is the first message we've received
if self.isClient:
# if I'm a client I have to respond
for serverVer in obj:
if serverVer in self.knownDialects:
self.sendEncoded(serverVer)
self._selectDialect(serverVer)
break
else:
# I can't speak any of those dialects.
log.msg("The client doesn't speak any of the protocols "
"offered by the server: disconnecting.")
self.transport.loseConnection()
else:
if obj in self.knownDialects:
self._selectDialect(obj)
else:
# the client just selected a protocol that I did not suggest.
log.msg("The client selected a protocol the server didn't "
"suggest and doesn't know: disconnecting.")
self.transport.loseConnection()
def connectionMade(self):
self.setPrefixLimit(_PREFIX_LIMIT)
self.currentDialect = None
if not self.isClient:
self.sendEncoded(self.knownDialects)
def gotItem(self, item):
l = self.listStack
if l:
l[-1][1].append(item)
else:
self.callExpressionReceived(item)
buffer = ''
def dataReceived(self, chunk):
buffer = self.buffer + chunk
listStack = self.listStack
gotItem = self.gotItem
while buffer:
assert self.buffer != buffer, "This ain't right: %s %s" % (repr(self.buffer), repr(buffer))
self.buffer = buffer
pos = 0
for ch in buffer:
if ch >= HIGH_BIT_SET:
break
pos = pos + 1
else:
if pos > self.prefixLimit:
raise BananaError("Security precaution: more than %d bytes of prefix" % (self.prefixLimit,))
return
num = buffer[:pos]
typebyte = buffer[pos]
rest = buffer[pos+1:]
if len(num) > self.prefixLimit:
raise BananaError("Security precaution: longer than %d bytes worth of prefix" % (self.prefixLimit,))
if typebyte == LIST:
num = b1282int(num)
if num > SIZE_LIMIT:
raise BananaError("Security precaution: List too long.")
listStack.append((num, []))
buffer = rest
elif typebyte == STRING:
num = b1282int(num)
if num > SIZE_LIMIT:
raise BananaError("Security precaution: String too long.")
if len(rest) >= num:
buffer = rest[num:]
gotItem(rest[:num])
else:
return
elif typebyte == INT:
buffer = rest
num = b1282int(num)
gotItem(num)
elif typebyte == LONGINT:
buffer = rest
num = b1282int(num)
gotItem(num)
elif typebyte == LONGNEG:
buffer = rest
num = b1282int(num)
gotItem(-num)
elif typebyte == NEG:
buffer = rest
num = -b1282int(num)
gotItem(num)
elif typebyte == VOCAB:
buffer = rest
num = b1282int(num)
gotItem(self.incomingVocabulary[num])
elif typebyte == FLOAT:
if len(rest) >= 8:
buffer = rest[8:]
gotItem(struct.unpack("!d", rest[:8])[0])
else:
return
else:
raise NotImplementedError(("Invalid Type Byte %r" % (typebyte,)))
while listStack and (len(listStack[-1][1]) == listStack[-1][0]):
item = listStack.pop()[1]
gotItem(item)
self.buffer = ''
def expressionReceived(self, lst):
"""Called when an expression (list, string, or int) is received.
"""
raise NotImplementedError()
outgoingVocabulary = {
# Jelly Data Types
'None' : 1,
'class' : 2,
'dereference' : 3,
'reference' : 4,
'dictionary' : 5,
'function' : 6,
'instance' : 7,
'list' : 8,
'module' : 9,
'persistent' : 10,
'tuple' : 11,
'unpersistable' : 12,
# PB Data Types
'copy' : 13,
'cache' : 14,
'cached' : 15,
'remote' : 16,
'local' : 17,
'lcache' : 18,
# PB Protocol Messages
'version' : 19,
'login' : 20,
'password' : 21,
'challenge' : 22,
'logged_in' : 23,
'not_logged_in' : 24,
'cachemessage' : 25,
'message' : 26,
'answer' : 27,
'error' : 28,
'decref' : 29,
'decache' : 30,
'uncache' : 31,
}
incomingVocabulary = {}
for k, v in outgoingVocabulary.items():
incomingVocabulary[v] = k
def __init__(self, isClient=1):
self.listStack = []
self.outgoingSymbols = copy.copy(self.outgoingVocabulary)
self.outgoingSymbolCount = 0
self.isClient = isClient
def sendEncoded(self, obj):
io = cStringIO.StringIO()
self._encode(obj, io.write)
value = io.getvalue()
self.transport.write(value)
def _encode(self, obj, write):
if isinstance(obj, (list, tuple)):
if len(obj) > SIZE_LIMIT:
raise BananaError(
"list/tuple is too long to send (%d)" % (len(obj),))
int2b128(len(obj), write)
write(LIST)
for elem in obj:
self._encode(elem, write)
elif isinstance(obj, (int, long)):
if obj < self._smallestLongInt or obj > self._largestLongInt:
raise BananaError(
"int/long is too large to send (%d)" % (obj,))
if obj < self._smallestInt:
int2b128(-obj, write)
write(LONGNEG)
elif obj < 0:
int2b128(-obj, write)
write(NEG)
elif obj <= self._largestInt:
int2b128(obj, write)
write(INT)
else:
int2b128(obj, write)
write(LONGINT)
elif isinstance(obj, float):
write(FLOAT)
write(struct.pack("!d", obj))
elif isinstance(obj, str):
# TODO: an API for extending banana...
if self.currentDialect == "pb" and obj in self.outgoingSymbols:
symbolID = self.outgoingSymbols[obj]
int2b128(symbolID, write)
write(VOCAB)
else:
if len(obj) > SIZE_LIMIT:
raise BananaError(
"string is too long to send (%d)" % (len(obj),))
int2b128(len(obj), write)
write(STRING)
write(obj)
else:
raise BananaError("could not send object: %r" % (obj,))
# For use from the interactive interpreter
_i = Banana()
_i.connectionMade()
_i._selectDialect("none")
def encode(lst):
"""Encode a list s-expression."""
io = cStringIO.StringIO()
_i.transport = io
_i.sendEncoded(lst)
return io.getvalue()
def decode(st):
"""
Decode a banana-encoded string.
"""
l = []
_i.expressionReceived = l.append
try:
_i.dataReceived(st)
finally:
_i.buffer = ''
del _i.expressionReceived
return l[0]
| bsd-3-clause |
wpoely86/easybuild-easyblocks | test/easyblocks/suite.py | 1 | 3217 | #!/usr/bin/python
##
# Copyright 2012-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
This script is a collection of all the testcases for easybuild-easyblocks.
Usage: "python -m easybuild.easyblocks.test.suite.py" or "./easybuild/easyblocks/test/suite.py"
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import shutil
import sys
import tempfile
import unittest
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.options import set_tmpdir
import test.easyblocks.general as g
import test.easyblocks.init_easyblocks as i
import test.easyblocks.module as m
# initialize logger for all the unit tests
fd, log_fn = tempfile.mkstemp(prefix='easybuild-easyblocks-tests-', suffix='.log')
os.close(fd)
os.remove(log_fn)
fancylogger.logToFile(log_fn)
log = fancylogger.getLogger()
log.setLevelName('DEBUG')
try:
tmpdir = set_tmpdir(raise_error=True)
except EasyBuildError, err:
sys.stderr.write("No execution rights on temporary files, specify another location via $TMPDIR: %s\n" % err)
sys.exit(1)
os.environ['EASYBUILD_TMP_LOGDIR'] = tempfile.mkdtemp(prefix='easyblocks_test_')
# call suite() for each module and then run them all
SUITE = unittest.TestSuite([x.suite() for x in [g, i, m]])
# uses XMLTestRunner if possible, so we can output an XML file that can be supplied to Jenkins
xml_msg = ""
try:
import xmlrunner # requires unittest-xml-reporting package
xml_dir = 'test-reports'
res = xmlrunner.XMLTestRunner(output=xml_dir, verbosity=1).run(SUITE)
xml_msg = ", XML output of tests available in %s directory" % xml_dir
except ImportError, err:
sys.stderr.write("WARNING: xmlrunner module not available, falling back to using unittest...\n\n")
res = unittest.TextTestRunner().run(SUITE)
fancylogger.logToFile(log_fn, enable=False)
shutil.rmtree(os.environ['EASYBUILD_TMP_LOGDIR'])
del os.environ['EASYBUILD_TMP_LOGDIR']
if not res.wasSuccessful():
sys.stderr.write("ERROR: Not all tests were successful.\n")
print "Log available at %s" % log_fn, xml_msg
sys.exit(2)
else:
for f in glob.glob('%s*' % log_fn):
os.remove(f)
shutil.rmtree(tmpdir)
| gpl-2.0 |
ocadotechnology/boto | tests/integration/s3/test_cert_verification.py | 126 | 1532 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on SQS endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.s3
class S3CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
s3 = True
regions = boto.s3.regions()
def sample_service_call(self, conn):
conn.get_all_buckets()
| mit |
seanfarley/pygit2 | test/__init__.py | 2 | 1719 | # -*- coding: UTF-8 -*-
#
# Copyright 2010 Google, Inc.
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Pygit2 test definitions.
These tests are run automatically with 'setup.py test', but can also be run
manually.
"""
import sys
import unittest
names = ['blob', 'commit', 'index', 'refs', 'repository', 'revwalk', 'tag',
'tree', 'status']
def test_suite():
modules = ['test.test_%s' % n for n in names]
return unittest.defaultTestLoader.loadTestsFromNames(modules)
def main():
unittest.main(module=__name__, defaultTest='test_suite', argv=sys.argv[:1])
if __name__ == '__main__':
main()
| gpl-2.0 |
lucasdavila/web2py-appreport | modules/plugin_appreport/libs/appreport/libs/pisa/libs/reportlab/src/reportlab/platypus/doctemplate.py | 10 | 45792 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/doctemplate.py
__version__=''' $Id: doctemplate.py 3791 2010-09-29 19:37:05Z andy $ '''
__doc__="""
This module contains the core structure of platypus.
rlatypus constructs documents. Document styles are determined by DocumentTemplates.
Each DocumentTemplate contains one or more PageTemplates which defines the look of the
pages of the document.
Each PageTemplate has a procedure for drawing the "non-flowing" part of the page
(for example the header, footer, page number, fixed logo graphic, watermark, etcetera) and
a set of Frames which enclose the flowing part of the page (for example the paragraphs,
tables, or non-fixed diagrams of the text).
A document is built when a DocumentTemplate is fed a sequence of Flowables.
The action of the build consumes the flowables in order and places them onto
frames on pages as space allows. When a frame runs out of space the next frame
of the page is used. If no frame remains a new page is created. A new page
can also be created if a page break is forced.
The special invisible flowable NextPageTemplate can be used to specify
the page template for the next page (which by default is the one being used
for the current frame).
"""
from reportlab.platypus.flowables import *
from reportlab.lib.units import inch
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.frames import Frame
from reportlab.rl_config import defaultPageSize, verbose
import reportlab.lib.sequencer
from reportlab.pdfgen import canvas
try:
set
except NameError:
from sets import Set as set
from base64 import encodestring, decodestring
try:
import cPickle as pickle
except ImportError:
import pickle
dumps = pickle.dumps
loads = pickle.loads
from types import *
import sys
import logging
logger = logging.getLogger("reportlab.platypus")
class LayoutError(Exception):
pass
def _fSizeString(f):
w=getattr(f,'width',None)
if w is None:
w=getattr(f,'_width',None)
h=getattr(f,'height',None)
if h is None:
h=getattr(f,'_height',None)
if w is not None or h is not None:
if w is None: w='???'
if h is None: h='???'
return '(%s x %s)' % (w,h)
return ''
def _doNothing(canvas, doc):
"Dummy callback for onPage"
pass
class PTCycle(list):
def __init__(self):
self._restart = 0
self._idx = 0
list.__init__(self)
def cyclicIterator(self):
while 1:
yield self[self._idx]
self._idx += 1
if self._idx>=len(self):
self._idx = self._restart
class IndexingFlowable(Flowable):
"""Abstract interface definition for flowables which might
hold references to other pages or themselves be targets
of cross-references. XRefStart, XRefDest, Table of Contents,
Indexes etc."""
def isIndexing(self):
return 1
def isSatisfied(self):
return 1
def notify(self, kind, stuff):
"""This will be called by the framework wherever 'stuff' happens.
'kind' will be a value that can be used to decide whether to
pay attention or not."""
pass
def beforeBuild(self):
"""Called by multiBuild before it starts; use this to clear
old contents"""
pass
def afterBuild(self):
"""Called after build ends but before isSatisfied"""
pass
class ActionFlowable(Flowable):
'''This Flowable is never drawn, it can be used for data driven controls
For example to change a page template (from one column to two, for example)
use NextPageTemplate which creates an ActionFlowable.
'''
def __init__(self,action=()):
#must call super init to ensure it has a width and height (of zero),
#as in some cases the packer might get called on it...
Flowable.__init__(self)
if type(action) not in (ListType, TupleType):
action = (action,)
self.action = tuple(action)
def apply(self,doc):
'''
This is called by the doc.build processing to allow the instance to
implement its behaviour
'''
action = self.action[0]
args = tuple(self.action[1:])
arn = 'handle_'+action
if arn=="handle_nextPageTemplate" and args[0]=='main':
pass
try:
getattr(doc,arn)(*args)
except AttributeError, aerr:
if aerr.args[0]==arn:
raise NotImplementedError, "Can't handle ActionFlowable(%s)" % action
else:
raise
except:
t, v, tb = sys.exc_info()
raise t, "%s\n handle_%s args=%s"%(v,action,args), tb
def __call__(self):
return self
def identity(self, maxLen=None):
return "ActionFlowable: %s%s" % (str(self.action),self._frameName())
class LCActionFlowable(ActionFlowable):
locChanger = 1 #we cause a frame or page change
def wrap(self, availWidth, availHeight):
'''Should never be called.'''
raise NotImplementedError
def draw(self):
'''Should never be called.'''
raise NotImplementedError
class NextFrameFlowable(ActionFlowable):
def __init__(self,ix,resume=0):
ActionFlowable.__init__(self,('nextFrame',ix,resume))
class CurrentFrameFlowable(LCActionFlowable):
def __init__(self,ix,resume=0):
ActionFlowable.__init__(self,('currentFrame',ix,resume))
class NullActionFlowable(ActionFlowable):
def apply(self,doc):
pass
class _FrameBreak(LCActionFlowable):
'''
A special ActionFlowable that allows setting doc._nextFrameIndex
eg story.append(FrameBreak('mySpecialFrame'))
'''
def __call__(self,ix=None,resume=0):
r = self.__class__(self.action+(resume,))
r._ix = ix
return r
def apply(self,doc):
if getattr(self,'_ix',None):
doc.handle_nextFrame(self._ix)
ActionFlowable.apply(self,doc)
FrameBreak = _FrameBreak('frameEnd')
PageBegin = LCActionFlowable('pageBegin')
def _evalMeasurement(n):
if type(n) is type(''):
from paraparser import _num
n = _num(n)
if type(n) is type(()): n = n[1]
return n
class FrameActionFlowable(Flowable):
_fixedWidth = _fixedHeight = 1
def __init__(self,*arg,**kw):
raise NotImplementedError('Abstract Class')
def frameAction(self,frame):
raise NotImplementedError('Abstract Class')
class Indenter(FrameActionFlowable):
"""Increases or decreases left and right margins of frame.
This allows one to have a 'context-sensitive' indentation
and makes nested lists way easier.
"""
def __init__(self, left=0, right=0):
self.left = _evalMeasurement(left)
self.right = _evalMeasurement(right)
def frameAction(self, frame):
frame._leftExtraIndent += self.left
frame._rightExtraIndent += self.right
class NotAtTopPageBreak(FrameActionFlowable):
def __init__(self):
pass
def frameAction(self,frame):
if not frame._atTop:
frame.add_generated_content(PageBreak())
class NextPageTemplate(ActionFlowable):
"""When you get to the next page, use the template specified (change to two column, for example) """
def __init__(self,pt):
ActionFlowable.__init__(self,('nextPageTemplate',pt))
class PageTemplate:
"""
essentially a list of Frames and an onPage routine to call at the start
of a page when this is selected. onPageEnd gets called at the end.
derived classes can also implement beforeDrawPage and afterDrawPage if they want
"""
def __init__(self,id=None,frames=[],onPage=_doNothing, onPageEnd=_doNothing,
pagesize=None):
frames = frames or []
if type(frames) not in (ListType,TupleType): frames = [frames]
assert filter(lambda x: not isinstance(x,Frame), frames)==[], "frames argument error"
self.id = id
self.frames = frames
self.onPage = onPage
self.onPageEnd = onPageEnd
self.pagesize = pagesize
def beforeDrawPage(self,canv,doc):
"""Override this if you want additional functionality or prefer
a class based page routine. Called before any flowables for
this page are processed."""
pass
def checkPageSize(self,canv,doc):
"""This gets called by the template framework
If canv size != template size then the canv size is set to
the template size or if that's not available to the
doc size.
"""
#### NEVER EVER EVER COMPARE FLOATS FOR EQUALITY
#RGB converting pagesizes to ints means we are accurate to one point
#RGB I suggest we should be aiming a little better
cp = None
dp = None
sp = None
if canv._pagesize: cp = map(int, canv._pagesize)
if self.pagesize: sp = map(int, self.pagesize)
if doc.pagesize: dp = map(int, doc.pagesize)
if cp!=sp:
if sp:
canv.setPageSize(self.pagesize)
elif cp!=dp:
canv.setPageSize(doc.pagesize)
def afterDrawPage(self, canv, doc):
"""This is called after the last flowable for the page has
been processed. You might use this if the page header or
footer needed knowledge of what flowables were drawn on
this page."""
pass
def _addGeneratedContent(flowables,frame):
S = getattr(frame,'_generated_content',None)
if S:
flowables[0:0] = S
del frame._generated_content
class onDrawStr(str):
def __new__(cls,value,onDraw,label,kind=None):
self = str.__new__(cls,value)
self.onDraw = onDraw
self.kind = kind
self.label = label
return self
class PageAccumulator:
'''gadget to accumulate information in a page
and then allow it to be interrogated at the end
of the page'''
_count = 0
def __init__(self,name=None):
if name is None:
name = self.__class__.__name__+str(self.__class__._count)
self.__class__._count += 1
self.name = name
self.data = []
def reset(self):
self.data[:] = []
def add(self,*args):
self.data.append(args)
def onDrawText(self,*args):
return '<onDraw name="%s" label="%s" />' % (self.name,encodestring(dumps(args)).strip())
def __call__(self,canv,kind,label):
self.add(*loads(decodestring(label)))
def attachToPageTemplate(self,pt):
if pt.onPage:
def onPage(canv,doc,oop=pt.onPage):
self.onPage(canv,doc)
oop(canv,doc)
else:
def onPage(canv,doc):
self.onPage(canv,doc)
pt.onPage = onPage
if pt.onPageEnd:
def onPageEnd(canv,doc,oop=pt.onPageEnd):
self.onPageEnd(canv,doc)
oop(canv,doc)
else:
def onPageEnd(canv,doc):
self.onPageEnd(canv,doc)
pt.onPageEnd = onPageEnd
def onPage(self,canv,doc):
'''this will be called at the start of the page'''
setattr(canv,self.name,self) #push ourselves onto the canvas
self.reset()
def onPageEnd(self,canv,doc):
'''this will be called at the end of a page'''
self.pageEndAction(canv,doc)
try:
delattr(canv,self.name)
except:
pass
self.reset()
def pageEndAction(self,canv,doc):
'''this should be overridden to do something useful'''
pass
def onDrawStr(self,value,*args):
return onDrawStr(value,self,encodestring(dumps(args)).strip())
class BaseDocTemplate:
"""
First attempt at defining a document template class.
The basic idea is simple.
1) The document has a list of data associated with it
this data should derive from flowables. We'll have
special classes like PageBreak, FrameBreak to do things
like forcing a page end etc.
2) The document has one or more page templates.
3) Each page template has one or more frames.
4) The document class provides base methods for handling the
story events and some reasonable methods for getting the
story flowables into the frames.
5) The document instances can override the base handler routines.
Most of the methods for this class are not called directly by the user,
but in some advanced usages they may need to be overridden via subclassing.
EXCEPTION: doctemplate.build(...) must be called for most reasonable uses
since it builds a document using the page template.
Each document template builds exactly one document into a file specified
by the filename argument on initialization.
Possible keyword arguments for the initialization:
- pageTemplates: A list of templates. Must be nonempty. Names
assigned to the templates are used for referring to them so no two used
templates should have the same name. For example you might want one template
for a title page, one for a section first page, one for a first page of
a chapter and two more for the interior of a chapter on odd and even pages.
If this argument is omitted then at least one pageTemplate should be provided
using the addPageTemplates method before the document is built.
- pageSize: a 2-tuple or a size constant from reportlab/lib/pagesizes.pu.
Used by the SimpleDocTemplate subclass which does NOT accept a list of
pageTemplates but makes one for you; ignored when using pageTemplates.
- showBoundary: if set draw a box around the frame boundaries.
- leftMargin:
- rightMargin:
- topMargin:
- bottomMargin: Margin sizes in points (default 1 inch). These margins may be
overridden by the pageTemplates. They are primarily of interest for the
SimpleDocumentTemplate subclass.
- allowSplitting: If set flowables (eg, paragraphs) may be split across frames or pages
(default: 1)
- title: Internal title for document (does not automatically display on any page)
- author: Internal author for document (does not automatically display on any page)
"""
_initArgs = { 'pagesize':defaultPageSize,
'pageTemplates':[],
'showBoundary':0,
'leftMargin':inch,
'rightMargin':inch,
'topMargin':inch,
'bottomMargin':inch,
'allowSplitting':1,
'title':None,
'author':None,
'subject':None,
'creator':None,
'keywords':[],
'invariant':None,
'pageCompression':None,
'_pageBreakQuick':1,
'rotation':0,
'_debug':0,
'encrypt': None,
'cropMarks': None,
'enforceColorSpace': None,
}
_invalidInitArgs = ()
_firstPageTemplateIndex = 0
def __init__(self, filename, **kw):
"""create a document template bound to a filename (see class documentation for keyword arguments)"""
self.filename = filename
self._nameSpace = dict(doc=self)
self._lifetimes = {}
for k in self._initArgs.keys():
if k not in kw:
v = self._initArgs[k]
else:
if k in self._invalidInitArgs:
raise ValueError, "Invalid argument %s" % k
v = kw[k]
setattr(self,k,v)
p = self.pageTemplates
self.pageTemplates = []
self.addPageTemplates(p)
# facility to assist multi-build and cross-referencing.
# various hooks can put things into here - key is what
# you want, value is a page number. This can then be
# passed to indexing flowables.
self._pageRefs = {}
self._indexingFlowables = []
#callback facility for progress monitoring
self._onPage = None
self._onProgress = None
self._flowableCount = 0 # so we know how far to go
#infinite loop detection if we start doing lots of empty pages
self._curPageFlowableCount = 0
self._emptyPages = 0
self._emptyPagesAllowed = 10
#context sensitive margins - set by story, not from outside
self._leftExtraIndent = 0.0
self._rightExtraIndent = 0.0
self._calc()
self.afterInit()
def _calc(self):
self._rightMargin = self.pagesize[0] - self.rightMargin
self._topMargin = self.pagesize[1] - self.topMargin
self.width = self._rightMargin - self.leftMargin
self.height = self._topMargin - self.bottomMargin
def setPageCallBack(self, func):
'Simple progress monitor - func(pageNo) called on each new page'
self._onPage = func
def setProgressCallBack(self, func):
'''Cleverer progress monitor - func(typ, value) called regularly'''
self._onProgress = func
def clean_hanging(self):
'handle internal postponed actions'
while len(self._hanging):
self.handle_flowable(self._hanging)
def addPageTemplates(self,pageTemplates):
'add one or a sequence of pageTemplates'
if type(pageTemplates) not in (ListType,TupleType):
pageTemplates = [pageTemplates]
#this test below fails due to inconsistent imports!
#assert filter(lambda x: not isinstance(x,PageTemplate), pageTemplates)==[], "pageTemplates argument error"
for t in pageTemplates:
self.pageTemplates.append(t)
def handle_documentBegin(self):
'''implement actions at beginning of document'''
self._hanging = [PageBegin]
self.pageTemplate = self.pageTemplates[self._firstPageTemplateIndex]
self.page = 0
self.beforeDocument()
def handle_pageBegin(self):
"""Perform actions required at beginning of page.
shouldn't normally be called directly"""
self.page += 1
if self._debug: logger.debug("beginning page %d" % self.page)
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
#keep a count of flowables added to this page. zero indicates bad stuff
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
self.frame = self.pageTemplate.frames[0]
self.frame._debug = self._debug
self.handle_frameBegin()
def handle_pageEnd(self):
''' show the current page
check the next page template
hang a page begin
'''
self._removeVars(('page','frame'))
#detect infinite loops...
if self._curPageFlowableCount == 0:
self._emptyPages += 1
else:
self._emptyPages = 0
if self._emptyPages >= self._emptyPagesAllowed:
if 1:
ident = "More than %d pages generated without content - halting layout. Likely that a flowable is too large for any frame." % self._emptyPagesAllowed
#leave to keep apart from the raise
raise LayoutError(ident)
else:
pass #attempt to restore to good state
else:
if self._onProgress:
self._onProgress('PAGE', self.canv.getPageNumber())
self.pageTemplate.afterDrawPage(self.canv, self)
self.pageTemplate.onPageEnd(self.canv, self)
self.afterPage()
if self._debug: logger.debug("ending page %d" % self.page)
self.canv.setPageRotation(getattr(self.pageTemplate,'rotation',self.rotation))
self.canv.showPage()
if hasattr(self,'_nextPageTemplateCycle'):
#they are cycling through pages'; we keep the index
self.pageTemplate = self._nextPageTemplateCycle.next()
elif hasattr(self,'_nextPageTemplateIndex'):
self.pageTemplate = self.pageTemplates[self._nextPageTemplateIndex]
del self._nextPageTemplateIndex
if self._emptyPages==0:
pass #store good state here
self._hanging.append(PageBegin)
def handle_pageBreak(self,slow=None):
'''some might choose not to end all the frames'''
if self._pageBreakQuick and not slow:
self.handle_pageEnd()
else:
n = len(self._hanging)
while len(self._hanging)==n:
self.handle_frameEnd()
def handle_frameBegin(self,resume=0):
'''What to do at the beginning of a frame'''
f = self.frame
if f._atTop:
if self.showBoundary or self.frame.showBoundary:
self.frame.drawBoundary(self.canv)
f._leftExtraIndent = self._leftExtraIndent
f._rightExtraIndent = self._rightExtraIndent
def handle_frameEnd(self,resume=0):
''' Handles the semantics of the end of a frame. This includes the selection of
the next frame or if this is the last frame then invoke pageEnd.
'''
self._removeVars(('frame',))
self._leftExtraIndent = self.frame._leftExtraIndent
self._rightExtraIndent = self.frame._rightExtraIndent
f = self.frame
if hasattr(self,'_nextFrameIndex'):
self.frame = self.pageTemplate.frames[self._nextFrameIndex]
self.frame._debug = self._debug
del self._nextFrameIndex
self.handle_frameBegin(resume)
elif hasattr(f,'lastFrame') or f is self.pageTemplate.frames[-1]:
self.handle_pageEnd()
self.frame = None
else:
self.frame = self.pageTemplate.frames[self.pageTemplate.frames.index(f) + 1]
self.frame._debug = self._debug
self.handle_frameBegin()
def handle_nextPageTemplate(self,pt):
'''On endPage change to the page template with name or index pt'''
if type(pt) is StringType:
if hasattr(self, '_nextPageTemplateCycle'): del self._nextPageTemplateCycle
for t in self.pageTemplates:
if t.id == pt:
self._nextPageTemplateIndex = self.pageTemplates.index(t)
return
raise ValueError, "can't find template('%s')"%pt
elif type(pt) is IntType:
if hasattr(self, '_nextPageTemplateCycle'): del self._nextPageTemplateCycle
self._nextPageTemplateIndex = pt
elif type(pt) in (ListType, TupleType):
#used for alternating left/right pages
#collect the refs to the template objects, complain if any are bad
c = PTCycle()
for ptn in pt:
found = 0
if ptn=='*': #special case name used to short circuit the iteration
c._restart = len(c)
continue
for t in self.pageTemplates:
if t.id == ptn:
c.append(t)
found = 1
if not found:
raise ValueError("Cannot find page template called %s" % ptn)
if not c:
raise ValueError("No valid page templates in cycle")
elif c._restart>len(c):
raise ValueError("Invalid cycle restart position")
#ensure we start on the first one
self._nextPageTemplateCycle = c.cyclicIterator()
else:
raise TypeError("argument pt should be string or integer or list")
def handle_nextFrame(self,fx,resume=0):
'''On endFrame change to the frame with name or index fx'''
if type(fx) is StringType:
for f in self.pageTemplate.frames:
if f.id == fx:
self._nextFrameIndex = self.pageTemplate.frames.index(f)
return
raise ValueError("can't find frame('%s') in %r(%s) which has frames %r"%(fx,self.pageTemplate,self.pageTemplate.id,[(f,f.id) for f in self.pageTemplate.frames]))
elif type(fx) is IntType:
self._nextFrameIndex = fx
else:
raise TypeError, "argument fx should be string or integer"
def handle_currentFrame(self,fx,resume=0):
'''change to the frame with name or index fx'''
self.handle_nextFrame(fx,resume)
self.handle_frameEnd(resume)
def handle_breakBefore(self, flowables):
'''preprocessing step to allow pageBreakBefore and frameBreakBefore attributes'''
first = flowables[0]
# if we insert a page break before, we'll process that, see it again,
# and go in an infinite loop. So we need to set a flag on the object
# saying 'skip me'. This should be unset on the next pass
if hasattr(first, '_skipMeNextTime'):
delattr(first, '_skipMeNextTime')
return
# this could all be made much quicker by putting the attributes
# in to the flowables with a defult value of 0
if hasattr(first,'pageBreakBefore') and first.pageBreakBefore == 1:
first._skipMeNextTime = 1
first.insert(0, PageBreak())
return
if hasattr(first,'style') and hasattr(first.style, 'pageBreakBefore') and first.style.pageBreakBefore == 1:
first._skipMeNextTime = 1
flowables.insert(0, PageBreak())
return
if hasattr(first,'frameBreakBefore') and first.frameBreakBefore == 1:
first._skipMeNextTime = 1
flowables.insert(0, FrameBreak())
return
if hasattr(first,'style') and hasattr(first.style, 'frameBreakBefore') and first.style.frameBreakBefore == 1:
first._skipMeNextTime = 1
flowables.insert(0, FrameBreak())
return
def handle_keepWithNext(self, flowables):
"implements keepWithNext"
i = 0
n = len(flowables)
while i<n and flowables[i].getKeepWithNext(): i += 1
if i:
if i<n and not getattr(flowables[i],'locChanger',None): i += 1
K = KeepTogether(flowables[:i])
mbe = getattr(self,'_multiBuildEdits',None)
if mbe:
for f in K._content[:-1]:
if hasattr(f,'keepWithNext'):
mbe((setattr,f,'keepWithNext',f.keepWithNext))
else:
mbe((delattr,f,'keepWithNext')) #must get it from a style
f.__dict__['keepWithNext'] = 0
else:
for f in K._content[:-1]:
f.__dict__['keepWithNext'] = 0
del flowables[:i]
flowables.insert(0,K)
def _fIdent(self,f,maxLen=None,frame=None):
if frame: f._frame = frame
try:
return f.identity(maxLen)
finally:
if frame: del f._frame
def handle_flowable(self,flowables):
'''try to handle one flowable from the front of list flowables.'''
#allow document a chance to look at, modify or ignore
#the object(s) about to be processed
self.filterFlowables(flowables)
self.handle_breakBefore(flowables)
self.handle_keepWithNext(flowables)
f = flowables[0]
del flowables[0]
if f is None:
return
if isinstance(f,PageBreak):
if isinstance(f,SlowPageBreak):
self.handle_pageBreak(slow=1)
else:
self.handle_pageBreak()
self.afterFlowable(f)
elif isinstance(f,ActionFlowable):
f.apply(self)
self.afterFlowable(f)
else:
frame = self.frame
canv = self.canv
#try to fit it then draw it
if frame.add(f, canv, trySplit=self.allowSplitting):
if not isinstance(f,FrameActionFlowable):
self._curPageFlowableCount += 1
self.afterFlowable(f)
_addGeneratedContent(flowables,frame)
else:
if self.allowSplitting:
# see if this is a splittable thing
S = frame.split(f,canv)
n = len(S)
else:
n = 0
if n:
if not isinstance(S[0],(PageBreak,SlowPageBreak,ActionFlowable)):
if not frame.add(S[0], canv, trySplit=0):
ident = "Splitting error(n==%d) on page %d in\n%s" % (n,self.page,self._fIdent(f,60,frame))
#leave to keep apart from the raise
raise LayoutError(ident)
self._curPageFlowableCount += 1
self.afterFlowable(S[0])
flowables[0:0] = S[1:] # put rest of splitted flowables back on the list
_addGeneratedContent(flowables,frame)
else:
flowables[0:0] = S # put splitted flowables back on the list
else:
if hasattr(f,'_postponed'):
ident = "Flowable %s%s too large on page %d in frame %r%s of template %r" % \
(self._fIdent(f,60,frame),_fSizeString(f),self.page, self.frame.id,
self.frame._aSpaceString(), self.pageTemplate.id)
#leave to keep apart from the raise
raise LayoutError(ident)
# this ought to be cleared when they are finally drawn!
f._postponed = 1
mbe = getattr(self,'_multiBuildEdits',None)
if mbe:
mbe((delattr,f,'_postponed'))
flowables.insert(0,f) # put the flowable back
self.handle_frameEnd()
#these are provided so that deriving classes can refer to them
_handle_documentBegin = handle_documentBegin
_handle_pageBegin = handle_pageBegin
_handle_pageEnd = handle_pageEnd
_handle_frameBegin = handle_frameBegin
_handle_frameEnd = handle_frameEnd
_handle_flowable = handle_flowable
_handle_nextPageTemplate = handle_nextPageTemplate
_handle_currentFrame = handle_currentFrame
_handle_nextFrame = handle_nextFrame
def _startBuild(self, filename=None, canvasmaker=canvas.Canvas):
self._calc()
#each distinct pass gets a sequencer
self.seq = reportlab.lib.sequencer.Sequencer()
self.canv = canvasmaker(filename or self.filename,
pagesize=self.pagesize,
invariant=self.invariant,
pageCompression=self.pageCompression,
enforceColorSpace=self.enforceColorSpace,
)
getattr(self.canv,'setEncrypt',lambda x: None)(self.encrypt)
self.canv._cropMarks = self.cropMarks
self.canv.setAuthor(self.author)
self.canv.setTitle(self.title)
self.canv.setSubject(self.subject)
self.canv.setCreator(self.creator)
self.canv.setKeywords(self.keywords)
if self._onPage:
self.canv.setPageCallBack(self._onPage)
self.handle_documentBegin()
def _endBuild(self):
self._removeVars(('build','page','frame'))
if self._hanging!=[] and self._hanging[-1] is PageBegin:
del self._hanging[-1]
self.clean_hanging()
else:
self.clean_hanging()
self.handle_pageBreak()
if getattr(self,'_doSave',1): self.canv.save()
if self._onPage: self.canv.setPageCallBack(None)
def build(self, flowables, filename=None, canvasmaker=canvas.Canvas):
"""Build the document from a list of flowables.
If the filename argument is provided then that filename is used
rather than the one provided upon initialization.
If the canvasmaker argument is provided then it will be used
instead of the default. For example a slideshow might use
an alternate canvas which places 6 slides on a page (by
doing translations, scalings and redefining the page break
operations).
"""
#assert filter(lambda x: not isinstance(x,Flowable), flowables)==[], "flowables argument error"
flowableCount = len(flowables)
if self._onProgress:
self._onProgress('STARTED',0)
self._onProgress('SIZE_EST', len(flowables))
self._startBuild(filename,canvasmaker)
#pagecatcher can drag in information from embedded PDFs and we want ours
#to take priority, so cache and reapply our own info dictionary after the build.
canv = self.canv
self._savedInfo = canv._doc.info
handled = 0
try:
canv._doctemplate = self
while len(flowables):
self.clean_hanging()
try:
first = flowables[0]
self.handle_flowable(flowables)
handled += 1
except:
#if it has trace info, add it to the traceback message.
if hasattr(first, '_traceInfo') and first._traceInfo:
exc = sys.exc_info()[1]
args = list(exc.args)
tr = first._traceInfo
args[0] += '\n(srcFile %s, line %d char %d to line %d char %d)' % (
tr.srcFile,
tr.startLineNo,
tr.startLinePos,
tr.endLineNo,
tr.endLinePos
)
exc.args = tuple(args)
raise
if self._onProgress:
self._onProgress('PROGRESS',flowableCount - len(flowables))
finally:
del canv._doctemplate
#reapply pagecatcher info
canv._doc.info = self._savedInfo
self._endBuild()
if self._onProgress:
self._onProgress('FINISHED',0)
def _allSatisfied(self):
"""Called by multi-build - are all cross-references resolved?"""
allHappy = 1
for f in self._indexingFlowables:
if not f.isSatisfied():
allHappy = 0
break
return allHappy
def notify(self, kind, stuff):
"""Forward to any listeners"""
for l in self._indexingFlowables:
l.notify(kind, stuff)
def pageRef(self, label):
"""hook to register a page number"""
if verbose: print "pageRef called with label '%s' on page %d" % (
label, self.page)
self._pageRefs[label] = self.page
def multiBuild(self, story,
maxPasses = 10,
**buildKwds
):
"""Makes multiple passes until all indexing flowables
are happy.
Returns number of passes"""
self._indexingFlowables = []
#scan the story and keep a copy
for thing in story:
if thing.isIndexing():
self._indexingFlowables.append(thing)
#better fix for filename is a 'file' problem
self._doSave = 0
passes = 0
mbe = []
self._multiBuildEdits = mbe.append
while 1:
passes += 1
if self._onProgress:
self._onProgress('PASS', passes)
if verbose: print 'building pass '+str(passes) + '...',
for fl in self._indexingFlowables:
fl.beforeBuild()
# work with a copy of the story, since it is consumed
tempStory = story[:]
self.build(tempStory, **buildKwds)
#self.notify('debug',None)
for fl in self._indexingFlowables:
fl.afterBuild()
happy = self._allSatisfied()
if happy:
self._doSave = 0
self.canv.save()
break
if passes > maxPasses:
raise IndexError, "Index entries not resolved after %d passes" % maxPasses
#work through any edits
while mbe:
e = mbe.pop(0)
e[0](*e[1:])
del self._multiBuildEdits
if verbose: print 'saved'
return passes
#these are pure virtuals override in derived classes
#NB these get called at suitable places by the base class
#so if you derive and override the handle_xxx methods
#it's up to you to ensure that they maintain the needed consistency
def afterInit(self):
"""This is called after initialisation of the base class."""
pass
def beforeDocument(self):
"""This is called before any processing is
done on the document."""
pass
def beforePage(self):
"""This is called at the beginning of page
processing, and immediately before the
beforeDrawPage method of the current page
template."""
pass
def afterPage(self):
"""This is called after page processing, and
immediately after the afterDrawPage method
of the current page template."""
pass
def filterFlowables(self,flowables):
'''called to filter flowables at the start of the main handle_flowable method.
Upon return if flowables[0] has been set to None it is discarded and the main
method returns.
'''
pass
def afterFlowable(self, flowable):
'''called after a flowable has been rendered'''
pass
_allowedLifetimes = 'page','frame','build','forever'
def docAssign(self,var,expr,lifetime):
if not isinstance(expr,(str,unicode)): expr=str(expr)
expr=expr.strip()
var=var.strip()
self.docExec('%s=(%s)'%(var.strip(),expr.strip()),lifetime)
def docExec(self,stmt,lifetime):
stmt=stmt.strip()
NS=self._nameSpace
K0=NS.keys()
try:
if lifetime not in self._allowedLifetimes:
raise ValueError('bad lifetime %r not in %r'%(lifetime,self._allowedLifetimes))
exec stmt in {},NS
except:
exc = sys.exc_info()[1]
args = list(exc.args)
msg = '\ndocExec %s lifetime=%r failed!' % (stmt,lifetime)
args.append(msg)
exc.args = tuple(args)
for k in NS.iterkeys():
if k not in K0:
del NS[k]
raise
self._addVars([k for k in NS.iterkeys() if k not in K0],lifetime)
def _addVars(self,vars,lifetime):
'''add namespace variables to lifetimes lists'''
LT=self._lifetimes
for var in vars:
for v in LT.itervalues():
if var in v:
v.remove(var)
LT.setdefault(lifetime,set([])).add(var)
def _removeVars(self,lifetimes):
'''remove namespace variables for with lifetime in lifetimes'''
LT=self._lifetimes
NS=self._nameSpace
for lifetime in lifetimes:
for k in LT.setdefault(lifetime,[]):
try:
del NS[k]
except KeyError:
pass
del LT[lifetime]
def docEval(self,expr):
try:
return eval(expr.strip(),{},self._nameSpace)
except:
exc = sys.exc_info()[1]
args = list(exc.args)
args[-1] += '\ndocEval %s failed!' % expr
exc.args = tuple(args)
raise
class SimpleDocTemplate(BaseDocTemplate):
"""A special case document template that will handle many simple documents.
See documentation for BaseDocTemplate. No pageTemplates are required
for this special case. A page templates are inferred from the
margin information and the onFirstPage, onLaterPages arguments to the build method.
A document which has all pages with the same look except for the first
page may can be built using this special approach.
"""
_invalidInitArgs = ('pageTemplates',)
def handle_pageBegin(self):
'''override base method to add a change of page template after the firstpage.
'''
self._handle_pageBegin()
self._handle_nextPageTemplate('Later')
def build(self,flowables,onFirstPage=_doNothing, onLaterPages=_doNothing, canvasmaker=canvas.Canvas):
"""build the document using the flowables. Annotate the first page using the onFirstPage
function and later pages using the onLaterPages function. The onXXX pages should follow
the signature
def myOnFirstPage(canvas, document):
# do annotations and modify the document
...
The functions can do things like draw logos, page numbers,
footers, etcetera. They can use external variables to vary
the look (for example providing page numbering or section names).
"""
self._calc() #in case we changed margins sizes etc
frameT = Frame(self.leftMargin, self.bottomMargin, self.width, self.height, id='normal')
self.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=onFirstPage,pagesize=self.pagesize),
PageTemplate(id='Later',frames=frameT, onPage=onLaterPages,pagesize=self.pagesize)])
if onFirstPage is _doNothing and hasattr(self,'onFirstPage'):
self.pageTemplates[0].beforeDrawPage = self.onFirstPage
if onLaterPages is _doNothing and hasattr(self,'onLaterPages'):
self.pageTemplates[1].beforeDrawPage = self.onLaterPages
BaseDocTemplate.build(self,flowables, canvasmaker=canvasmaker)
def progressCB(typ, value):
"""Example prototype for progress monitoring.
This aims to provide info about what is going on
during a big job. It should enable, for example, a reasonably
smooth progress bar to be drawn. We design the argument
signature to be predictable and conducive to programming in
other (type safe) languages. If set, this will be called
repeatedly with pairs of values. The first is a string
indicating the type of call; the second is a numeric value.
typ 'STARTING', value = 0
typ 'SIZE_EST', value = numeric estimate of job size
typ 'PASS', value = number of this rendering pass
typ 'PROGRESS', value = number between 0 and SIZE_EST
typ 'PAGE', value = page number of page
type 'FINISHED', value = 0
The sequence is
STARTING - always called once
SIZE_EST - always called once
PROGRESS - called often
PAGE - called often when page is emitted
FINISHED - called when really, really finished
some juggling is needed to accurately estimate numbers of
pages in pageDrawing mode.
NOTE: the SIZE_EST is a guess. It is possible that the
PROGRESS value may slightly exceed it, or may even step
back a little on rare occasions. The only way to be
really accurate would be to do two passes, and I don't
want to take that performance hit.
"""
print 'PROGRESS MONITOR: %-10s %d' % (typ, value)
if __name__ == '__main__':
from reportlab.lib.styles import _baseFontName, _baseFontNameB
def myFirstPage(canvas, doc):
from reportlab.lib.colors import red
PAGE_HEIGHT = canvas._pagesize[1]
canvas.saveState()
canvas.setStrokeColor(red)
canvas.setLineWidth(5)
canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont(_baseFontNameB,24)
canvas.drawString(108, PAGE_HEIGHT-108, "TABLE OF CONTENTS DEMO")
canvas.setFont(_baseFontName,12)
canvas.drawString(4 * inch, 0.75 * inch, "First Page")
canvas.restoreState()
def myLaterPages(canvas, doc):
from reportlab.lib.colors import red
PAGE_HEIGHT = canvas._pagesize[1]
canvas.saveState()
canvas.setStrokeColor(red)
canvas.setLineWidth(5)
canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont(_baseFontName,12)
canvas.drawString(4 * inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def run():
objects_to_draw = []
from reportlab.lib.styles import ParagraphStyle
#from paragraph import Paragraph
from doctemplate import SimpleDocTemplate
#need a style
normal = ParagraphStyle('normal')
normal.firstLineIndent = 18
normal.spaceBefore = 6
from reportlab.lib.randomtext import randomText
import random
for i in range(15):
height = 0.5 + (2*random.random())
box = XBox(6 * inch, height * inch, 'Box Number %d' % i)
objects_to_draw.append(box)
para = Paragraph(randomText(), normal)
objects_to_draw.append(para)
SimpleDocTemplate('doctemplate.pdf').build(objects_to_draw,
onFirstPage=myFirstPage,onLaterPages=myLaterPages)
run()
| lgpl-3.0 |
veger/ansible | lib/ansible/plugins/lookup/keyring.py | 59 | 1901 | # (c) 2016, Samuel Boucher <boucher.samuel.c@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: keyring
author:
- Samuel Boucher <boucher.samuel.c@gmail.com>
version_added: "2.3"
requirements:
- keyring (python library)
short_description: grab secrets from the OS keyring
description:
- Allows you to access data stored in the OS provided keyring/keychain.
"""
EXAMPLES = """
- name : output secrets to screen (BAD IDEA)
debug:
msg: "Password: {{item}}"
with_keyring:
- 'servicename username'
- name: access mysql with password from keyring
mysql_db: login_password={{lookup('keyring','mysql joe')}} login_user=joe
"""
RETURN = """
_raw:
description: secrets stored
"""
HAS_KEYRING = True
from ansible.errors import AnsibleError
from ansible.utils.display import Display
try:
import keyring
except ImportError:
HAS_KEYRING = False
from ansible.plugins.lookup import LookupBase
display = Display()
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
if not HAS_KEYRING:
raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
display.vvvv(u"keyring: %s" % keyring.get_keyring())
ret = []
for term in terms:
(servicename, username) = (term.split()[0], term.split()[1])
display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
password = keyring.get_password(servicename, username)
if password is None:
raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
ret.append(password.rstrip())
return ret
| gpl-3.0 |
elit3ge/SickRage | sickbeard/providers/womble.py | 7 | 2411 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import generic
from sickbeard import logger
from sickbeard import tvcache
class WombleProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "Womble's Index")
self.enabled = False
self.public = True
self.cache = WombleCache(self)
self.urls = {'base_url': 'https://newshost.co.za/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class WombleCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Womble's Index every 15 minutes max
self.minTime = 15
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
return
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for url in [self.provider.url + 'rss/?sec=tv-x264&fr=false',
self.provider.url + 'rss/?sec=tv-sd&fr=false',
self.provider.url + 'rss/?sec=tv-dvd&fr=false',
self.provider.url + 'rss/?sec=tv-hd&fr=false']:
logger.log(u'Womble\'s Index cache update URL: ' + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = WombleProvider()
| gpl-3.0 |
robhudson/django | tests/select_related_onetoone/models.py | 274 | 2483 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class User(models.Model):
username = models.CharField(max_length=100)
email = models.EmailField()
def __str__(self):
return self.username
@python_2_unicode_compatible
class UserProfile(models.Model):
user = models.OneToOneField(User, models.CASCADE)
city = models.CharField(max_length=100)
state = models.CharField(max_length=2)
def __str__(self):
return "%s, %s" % (self.city, self.state)
@python_2_unicode_compatible
class UserStatResult(models.Model):
results = models.CharField(max_length=50)
def __str__(self):
return 'UserStatResults, results = %s' % (self.results,)
@python_2_unicode_compatible
class UserStat(models.Model):
user = models.OneToOneField(User, models.CASCADE, primary_key=True)
posts = models.IntegerField()
results = models.ForeignKey(UserStatResult, models.CASCADE)
def __str__(self):
return 'UserStat, posts = %s' % (self.posts,)
@python_2_unicode_compatible
class StatDetails(models.Model):
base_stats = models.OneToOneField(UserStat, models.CASCADE)
comments = models.IntegerField()
def __str__(self):
return 'StatDetails, comments = %s' % (self.comments,)
class AdvancedUserStat(UserStat):
karma = models.IntegerField()
class Image(models.Model):
name = models.CharField(max_length=100)
class Product(models.Model):
name = models.CharField(max_length=100)
image = models.OneToOneField(Image, models.SET_NULL, null=True)
@python_2_unicode_compatible
class Parent1(models.Model):
name1 = models.CharField(max_length=50)
def __str__(self):
return self.name1
@python_2_unicode_compatible
class Parent2(models.Model):
# Avoid having two "id" fields in the Child1 subclass
id2 = models.AutoField(primary_key=True)
name2 = models.CharField(max_length=50)
def __str__(self):
return self.name2
@python_2_unicode_compatible
class Child1(Parent1, Parent2):
value = models.IntegerField()
def __str__(self):
return self.name1
@python_2_unicode_compatible
class Child2(Parent1):
parent2 = models.OneToOneField(Parent2, models.CASCADE)
value = models.IntegerField()
def __str__(self):
return self.name1
class Child3(Child2):
value3 = models.IntegerField()
class Child4(Child1):
value4 = models.IntegerField()
| bsd-3-clause |
KaiSzuttor/espresso | testsuite/python/rotational-diffusion-aniso.py | 2 | 16657 | # Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import tests_common
@utx.skipIfMissingFeatures(["ROTATION", "PARTICLE_ANISOTROPY",
"ROTATIONAL_INERTIA", "DIPOLES"])
class RotDiffAniso(ut.TestCase):
longMessage = True
round_error_prec = 1E-14
# Handle for espresso system
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.cell_system.skin = 5.0
# The NVT thermostat parameters
kT = 0.0
gamma_global = np.zeros((3))
# Particle properties
J = [0.0, 0.0, 0.0]
np.random.seed(4)
def setUp(self):
self.system.time = 0.0
self.system.part.clear()
if "BROWNIAN_DYNAMICS" in espressomd.features():
self.system.thermostat.turn_off()
# the default integrator is supposed implicitly
self.system.integrator.set_nvt()
def add_particles_setup(self, n):
"""
Adding particles according to the
previously set parameters.
Parameters
----------
n : :obj:`int`
Number of particles.
"""
for ind in range(n):
part_pos = np.random.random(3) * self.box
self.system.part.add(rotation=(1, 1, 1), id=ind,
pos=part_pos)
self.system.part[ind].rinertia = self.J
if espressomd.has_features("ROTATION"):
self.system.part[ind].omega_body = [0.0, 0.0, 0.0]
def set_anisotropic_param(self):
"""
Select parameters for anisotropic particles.
"""
# NVT thermostat
# Note: here & hereinafter specific variations in the random parameter
# ranges are related to the test execution duration to achieve the
# required statistical averages faster. The friction gamma_global should
# be large enough in order to have the small enough D ~ kT / gamma and
# to observe the details of the original rotational diffusion: the
# Perrin1936 (see the reference below) tests are visible only when the
# diffusive rotation is ~pi due to the exponential temporal dependencies
# (see the equations referred in the check_rot_diffusion()).
# Also, t0 ~ J / gamma should be small enough in order to remove the
# small-time-scale diffusion effects which do not fit the Perrin1936's
# tests which are based on the partial differential equation
# (eq. (68), Perrin1934) leading only to the simple classical
# Einstein-Smoluchowski equations of the diffusion in a contrast of the
# eq. (10.2.26) [N. Pottier, doi:10.1007/s10955-010-0114-6 (2010)].
self.gamma_global = 1E2 * np.random.uniform(0.35, 1.05, (3))
# Particles' properties
# As far as the problem characteristic time is t0 ~ J / gamma
# and the Langevin equation finite-difference approximation is stable
# only for time_step << t0, it is needed to set the moment of inertia
# higher than some minimal value.
# Also, it is expected to test the large enough J.
# It should be not very large, otherwise the thermalization will require
# too much of the CPU time: the in silico time should clock over the
# t0.
self.J = np.random.uniform(1.5, 16.5, (3))
def set_isotropic_param(self):
"""
Select parameters for isotropic particles.
Parameters
----------
"""
# NVT thermostat
# see the comments in set_anisotropic_param()
self.gamma_global[0] = 1E2 * np.random.uniform(0.35, 1.05)
self.gamma_global[1] = self.gamma_global[0]
self.gamma_global[2] = self.gamma_global[0]
# Particles' properties
# see the comments in set_anisotropic_param()
self.J[0] = np.random.uniform(1.5, 16.5)
self.J[1] = self.J[0]
self.J[2] = self.J[0]
def rot_diffusion_param_setup(self):
"""
Setup the parameters for the rotational diffusion
test check_rot_diffusion().
"""
# Time
# The time step should be less than t0 ~ mass / gamma
self.system.time_step = 3E-3
# Space
self.box = 10.0
self.system.box_l = 3 * [self.box]
self.system.periodicity = [0, 0, 0]
# NVT thermostat
# Just some temperature range to cover by the test:
self.kT = np.random.uniform(0.5, 1.5)
def check_rot_diffusion(self, n):
"""
The rotational diffusion tests based on the reference work
[Perrin, F. (1936) Journal de Physique et Le Radium, 7(1), 1-11.
https://doi.org/10.1051/jphysrad:01936007010100]
with a theoretical background of
[Perrin, F. (1934) Journal de Physique et Le Radium, 5(10), 497-511.
https://doi.org/10.1051/jphysrad:01934005010049700]
Parameters
----------
n : :obj:`int`
Number of particles.
"""
# Global diffusivity tensor in the body frame:
D = self.kT / self.gamma_global
# Thermalizing...
therm_steps = 100
self.system.integrator.run(therm_steps)
# Measuring...
# Set the initial conditions according to the [Perrin1936], p.3.
# The body angular velocity is rotated now, but there is only the
# thermal velocity, hence, this does not impact the test and its
# physical context.
for ind in range(n):
self.system.part[ind].quat = [1.0, 0.0, 0.0, 0.0]
# Average direction cosines
# Diagonal ones:
dcosjj_validate = np.zeros((3))
dcosjj_dev = np.zeros((3))
# same to the power of 2
dcosjj2_validate = np.zeros((3))
dcosjj2_dev = np.zeros((3))
# The non-diagonal elements for 2 different tests: negative ("nn") and
# positive ("pp") ones.
dcosijpp_validate = np.ones((3, 3))
dcosijpp_dev = np.zeros((3, 3))
dcosijnn_validate = np.ones((3, 3))
dcosijnn_dev = np.zeros((3, 3))
# The non-diagonal elements to the power of 2
dcosij2_validate = np.ones((3, 3))
dcosij2_dev = np.zeros((3, 3))
self.system.time = 0.0
int_steps = 20
loops = 100
for _ in range(loops):
self.system.integrator.run(steps=int_steps)
dcosjj = np.zeros((3))
dcosjj2 = np.zeros((3))
dcosijpp = np.zeros((3, 3))
dcosijnn = np.zeros((3, 3))
dcosij2 = np.zeros((3, 3))
for ind in range(n):
# Just a direction cosines functions averaging..
dir_cos = tests_common.rotation_matrix_quat(self.system, ind)
for j in range(3):
# the LHS of eq. (23) [Perrin1936].
dcosjj[j] += dir_cos[j, j]
# the LHS of eq. (32) [Perrin1936].
dcosjj2[j] += dir_cos[j, j]**2.0
for i in range(3):
if i != j:
# the LHS of eq. (24) [Perrin1936].
dcosijpp[i, j] += dir_cos[i, i] * dir_cos[j, j] + \
dir_cos[i, j] * dir_cos[j, i]
# the LHS of eq. (25) [Perrin1936].
dcosijnn[i, j] += dir_cos[i, i] * dir_cos[j, j] - \
dir_cos[i, j] * dir_cos[j, i]
# the LHS of eq. (33) [Perrin1936].
dcosij2[i, j] += dir_cos[i, j]**2.0
dcosjj /= n
dcosjj2 /= n
dcosijpp /= n
dcosijnn /= n
dcosij2 /= n
# Actual comparison.
tolerance = 0.2
# Too small values of the direction cosines are out of interest
# compare to 0..1 range.
min_value = 0.14
# Eq. (23) [Perrin1936].
dcosjj_validate[0] = np.exp(-(D[1] + D[2]) * self.system.time)
dcosjj_validate[1] = np.exp(-(D[0] + D[2]) * self.system.time)
dcosjj_validate[2] = np.exp(-(D[0] + D[1]) * self.system.time)
dcosjj_dev = np.absolute(
dcosjj - dcosjj_validate) / dcosjj_validate
for j in range(3):
if np.absolute(dcosjj_validate[j]) < min_value:
dcosjj_dev[j] = 0.0
# Eq. (24) [Perrin1936].
dcosijpp_validate[0, 1] = np.exp(
-(4 * D[2] + D[1] + D[0]) * self.system.time)
dcosijpp_validate[1, 0] = np.exp(
-(4 * D[2] + D[1] + D[0]) * self.system.time)
dcosijpp_validate[0, 2] = np.exp(
-(4 * D[1] + D[2] + D[0]) * self.system.time)
dcosijpp_validate[2, 0] = np.exp(
-(4 * D[1] + D[2] + D[0]) * self.system.time)
dcosijpp_validate[1, 2] = np.exp(
-(4 * D[0] + D[2] + D[1]) * self.system.time)
dcosijpp_validate[2, 1] = np.exp(
-(4 * D[0] + D[2] + D[1]) * self.system.time)
dcosijpp_dev = np.absolute(
dcosijpp - dcosijpp_validate) / dcosijpp_validate
for i in range(3):
for j in range(3):
if np.absolute(dcosijpp_validate[i, j]) < min_value:
dcosijpp_dev[i, j] = 0.0
# Eq. (25) [Perrin1936].
dcosijnn_validate[0, 1] = np.exp(-(D[1] + D[0]) * self.system.time)
dcosijnn_validate[1, 0] = np.exp(-(D[1] + D[0]) * self.system.time)
dcosijnn_validate[0, 2] = np.exp(-(D[2] + D[0]) * self.system.time)
dcosijnn_validate[2, 0] = np.exp(-(D[2] + D[0]) * self.system.time)
dcosijnn_validate[1, 2] = np.exp(-(D[2] + D[1]) * self.system.time)
dcosijnn_validate[2, 1] = np.exp(-(D[2] + D[1]) * self.system.time)
dcosijnn_dev = np.absolute(
dcosijnn - dcosijnn_validate) / dcosijnn_validate
for i in range(3):
for j in range(3):
if np.absolute(dcosijnn_validate[i, j]) < min_value:
dcosijnn_dev[i, j] = 0.0
# Eq. (30) [Perrin1936].
D0 = sum(D[:]) / 3.0
D1D1 = 0.0
for j in range(3):
for i in range(3):
if i != j:
D1D1 += D[i] * D[j]
D1D1 /= 6.0
# Technical workaround of a digital arithmetic issue for isotropic
# particle
if np.absolute((D0**2 - D1D1) / (D0**2 + D1D1)
) < self.round_error_prec:
D1D1 *= (1.0 - 2.0 * self.round_error_prec)
# Eq. (32) [Perrin1936].
dcosjj2_validate = 1. / 3. + (1. / 3.) * (1. + (D - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 - np.sqrt(D0**2 - D1D1)) * self.system.time) \
+ (1. / 3.) * (1. - (D - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 + np.sqrt(D0**2 - D1D1)) * self.system.time)
dcosjj2_dev = np.absolute(
dcosjj2 - dcosjj2_validate) / dcosjj2_validate
for j in range(3):
if np.absolute(dcosjj2_validate[j]) < min_value:
dcosjj2_dev[j] = 0.0
# Eq. (33) [Perrin1936].
dcosij2_validate[0, 1] = 1. / 3. - (1. / 6.) * (1. - (D[2] - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 - np.sqrt(D0**2 - D1D1)) * self.system.time) \
- (1. / 6.) * (1. + (D[2] - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 + np.sqrt(D0**2 - D1D1)) * self.system.time)
dcosij2_validate[1, 0] = dcosij2_validate[0, 1]
dcosij2_validate[0, 2] = 1. / 3. - (1. / 6.) * (1. - (D[1] - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 - np.sqrt(D0**2 - D1D1)) * self.system.time) \
- (1. / 6.) * (1. + (D[1] - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 + np.sqrt(D0**2 - D1D1)) * self.system.time)
dcosij2_validate[2, 0] = dcosij2_validate[0, 2]
dcosij2_validate[1, 2] = 1. / 3. - (1. / 6.) * (1. - (D[0] - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 - np.sqrt(D0**2 - D1D1)) * self.system.time) \
- (1. / 6.) * (1. + (D[0] - D0) / (2. * np.sqrt(D0**2 - D1D1))) \
* np.exp(-6. * (D0 + np.sqrt(D0**2 - D1D1)) * self.system.time)
dcosij2_validate[2, 1] = dcosij2_validate[1, 2]
dcosij2_dev = np.absolute(
dcosij2 - dcosij2_validate) / dcosij2_validate
for i in range(3):
for j in range(3):
if np.absolute(dcosij2_validate[i, j]) < min_value:
dcosij2_dev[i, j] = 0.0
for j in range(3):
self.assertLessEqual(
abs(dcosjj_dev[j]), tolerance,
msg='Relative deviation dcosjj_dev[{0}] in a rotational '
'diffusion is too large: {1}'.format(j, dcosjj_dev[j]))
self.assertLessEqual(
abs(dcosjj2_dev[j]), tolerance,
msg='Relative deviation dcosjj2_dev[{0}] in a rotational '
'diffusion is too large: {1}'.format(j, dcosjj2_dev[j]))
for i in range(3):
if i != j:
self.assertLessEqual(
abs(dcosijpp_dev[i, j]), tolerance,
msg='Relative deviation dcosijpp_dev[{0},{1}] in a '
'rotational diffusion is too large: {2}'
.format(i, j, dcosijpp_dev[i, j]))
self.assertLessEqual(
abs(dcosijnn_dev[i, j]), tolerance,
msg='Relative deviation dcosijnn_dev[{0},{1}] in a '
'rotational diffusion is too large: {2}'
.format(i, j, dcosijnn_dev[i, j]))
self.assertLessEqual(
abs(dcosij2_dev[i, j]), tolerance,
msg='Relative deviation dcosij2_dev[{0},{1}] in a '
'rotational diffusion is too large: {2}'
.format(i, j, dcosij2_dev[i, j]))
# Langevin Dynamics / Anisotropic
def test_case_00(self):
n = 800
self.rot_diffusion_param_setup()
self.set_anisotropic_param()
self.add_particles_setup(n)
self.system.thermostat.set_langevin(
kT=self.kT, gamma=self.gamma_global, seed=42)
# Actual integration and validation run
self.check_rot_diffusion(n)
# Langevin Dynamics / Isotropic
def test_case_01(self):
n = 800
self.rot_diffusion_param_setup()
self.set_isotropic_param()
self.add_particles_setup(n)
self.system.thermostat.set_langevin(
kT=self.kT, gamma=self.gamma_global, seed=42)
# Actual integration and validation run
self.check_rot_diffusion(n)
if "BROWNIAN_DYNAMICS" in espressomd.features():
# Brownian Dynamics / Isotropic
def test_case_10(self):
n = 800
self.system.thermostat.turn_off()
self.rot_diffusion_param_setup()
self.set_isotropic_param()
self.add_particles_setup(n)
self.system.thermostat.set_brownian(
kT=self.kT, gamma=self.gamma_global, seed=42)
self.system.integrator.set_brownian_dynamics()
# Actual integration and validation run
self.check_rot_diffusion(n)
if __name__ == '__main__':
ut.main()
| gpl-3.0 |
datalogics/scons | src/engine/SCons/JobTests.py | 2 | 18800 | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import unittest
import random
import math
import SCons.Job
import sys
import time
# a large number
num_sines = 10000
# how many parallel jobs to perform for the test
num_jobs = 11
# how many tasks to perform for the test
num_tasks = num_jobs*5
class DummyLock:
"fake lock class to use if threads are not supported"
def acquire(self):
pass
def release(self):
pass
class NoThreadsException:
"raised by the ParallelTestCase if threads are not supported"
def __str__(self):
return "the interpreter doesn't support threads"
class Task:
"""A dummy task class for testing purposes."""
def __init__(self, i, taskmaster):
self.i = i
self.taskmaster = taskmaster
self.was_executed = 0
self.was_prepared = 0
def prepare(self):
self.was_prepared = 1
def _do_something(self):
pass
def execute(self):
self.taskmaster.test_case.failUnless(self.was_prepared,
"the task wasn't prepared")
self.taskmaster.guard.acquire()
self.taskmaster.begin_list.append(self.i)
self.taskmaster.guard.release()
self._do_something()
self.was_executed = 1
self.taskmaster.guard.acquire()
self.taskmaster.end_list.append(self.i)
self.taskmaster.guard.release()
def executed(self):
self.taskmaster.num_executed = self.taskmaster.num_executed + 1
self.taskmaster.test_case.failUnless(self.was_prepared,
"the task wasn't prepared")
self.taskmaster.test_case.failUnless(self.was_executed,
"the task wasn't really executed")
self.taskmaster.test_case.failUnless(isinstance(self, Task),
"the task wasn't really a Task instance")
def failed(self):
self.taskmaster.num_failed = self.taskmaster.num_failed + 1
self.taskmaster.stop = 1
self.taskmaster.test_case.failUnless(self.was_prepared,
"the task wasn't prepared")
def postprocess(self):
self.taskmaster.num_postprocessed = self.taskmaster.num_postprocessed + 1
class RandomTask(Task):
def _do_something(self):
# do something that will take some random amount of time:
for i in range(random.randrange(0, num_sines, 1)):
x = math.sin(i)
time.sleep(0.01)
class ExceptionTask:
"""A dummy task class for testing purposes."""
def __init__(self, i, taskmaster):
self.taskmaster = taskmaster
self.was_prepared = 0
def prepare(self):
self.was_prepared = 1
def execute(self):
raise "exception"
def executed(self):
self.taskmaster.num_executed = self.taskmaster.num_executed + 1
self.taskmaster.test_case.failUnless(self.was_prepared,
"the task wasn't prepared")
self.taskmaster.test_case.failUnless(self.was_executed,
"the task wasn't really executed")
self.taskmaster.test_case.failUnless(self.__class__ is Task,
"the task wasn't really a Task instance")
def failed(self):
self.taskmaster.num_failed = self.taskmaster.num_failed + 1
self.taskmaster.stop = 1
self.taskmaster.test_case.failUnless(self.was_prepared,
"the task wasn't prepared")
def postprocess(self):
self.taskmaster.num_postprocessed = self.taskmaster.num_postprocessed + 1
def exception_set(self):
self.taskmaster.exception_set()
class Taskmaster:
"""A dummy taskmaster class for testing the job classes."""
def __init__(self, n, test_case, Task):
"""n is the number of dummy tasks to perform."""
self.test_case = test_case
self.stop = None
self.num_tasks = n
self.num_iterated = 0
self.num_executed = 0
self.num_failed = 0
self.num_postprocessed = 0
self.Task = Task
# 'guard' guards 'task_begin_list' and 'task_end_list'
try:
import threading
self.guard = threading.Lock()
except:
self.guard = DummyLock()
# keep track of the order tasks are begun in
self.begin_list = []
# keep track of the order tasks are completed in
self.end_list = []
def next_task(self):
if self.stop or self.all_tasks_are_iterated():
return None
else:
self.num_iterated = self.num_iterated + 1
return self.Task(self.num_iterated, self)
def all_tasks_are_executed(self):
return self.num_executed == self.num_tasks
def all_tasks_are_iterated(self):
return self.num_iterated == self.num_tasks
def all_tasks_are_postprocessed(self):
return self.num_postprocessed == self.num_tasks
def tasks_were_serial(self):
"analyze the task order to see if they were serial"
serial = 1 # assume the tasks were serial
for i in range(num_tasks):
serial = serial and (self.begin_list[i]
== self.end_list[i]
== (i + 1))
return serial
def exception_set(self):
pass
SaveThreadPool = None
ThreadPoolCallList = []
class ParallelTestCase(unittest.TestCase):
def runTest(self):
"test parallel jobs"
try:
import threading
except:
raise NoThreadsException()
taskmaster = Taskmaster(num_tasks, self, RandomTask)
jobs = SCons.Job.Jobs(num_jobs, taskmaster)
jobs.run()
self.failUnless(not taskmaster.tasks_were_serial(),
"the tasks were not executed in parallel")
self.failUnless(taskmaster.all_tasks_are_executed(),
"all the tests were not executed")
self.failUnless(taskmaster.all_tasks_are_iterated(),
"all the tests were not iterated over")
self.failUnless(taskmaster.all_tasks_are_postprocessed(),
"all the tests were not postprocessed")
self.failIf(taskmaster.num_failed,
"some task(s) failed to execute")
# Verify that parallel jobs will pull all of the completed tasks
# out of the queue at once, instead of one by one. We do this by
# replacing the default ThreadPool class with one that records the
# order in which tasks are put() and get() to/from the pool, and
# which sleeps a little bit before call get() to let the initial
# tasks complete and get their notifications on the resultsQueue.
class SleepTask(Task):
def _do_something(self):
time.sleep(0.1)
global SaveThreadPool
SaveThreadPool = SCons.Job.ThreadPool
class WaitThreadPool(SaveThreadPool):
def put(self, task):
ThreadPoolCallList.append('put(%s)' % task.i)
return SaveThreadPool.put(self, task)
def get(self):
time.sleep(0.5)
result = SaveThreadPool.get(self)
ThreadPoolCallList.append('get(%s)' % result[0].i)
return result
SCons.Job.ThreadPool = WaitThreadPool
try:
taskmaster = Taskmaster(3, self, SleepTask)
jobs = SCons.Job.Jobs(2, taskmaster)
jobs.run()
# The key here is that we get(1) and get(2) from the
# resultsQueue before we put(3), but get(1) and get(2) can
# be in either order depending on how the first two parallel
# tasks get scheduled by the operating system.
expect = [
['put(1)', 'put(2)', 'get(1)', 'get(2)', 'put(3)', 'get(3)'],
['put(1)', 'put(2)', 'get(2)', 'get(1)', 'put(3)', 'get(3)'],
]
assert ThreadPoolCallList in expect, ThreadPoolCallList
finally:
SCons.Job.ThreadPool = SaveThreadPool
class SerialTestCase(unittest.TestCase):
def runTest(self):
"test a serial job"
taskmaster = Taskmaster(num_tasks, self, RandomTask)
jobs = SCons.Job.Jobs(1, taskmaster)
jobs.run()
self.failUnless(taskmaster.tasks_were_serial(),
"the tasks were not executed in series")
self.failUnless(taskmaster.all_tasks_are_executed(),
"all the tests were not executed")
self.failUnless(taskmaster.all_tasks_are_iterated(),
"all the tests were not iterated over")
self.failUnless(taskmaster.all_tasks_are_postprocessed(),
"all the tests were not postprocessed")
self.failIf(taskmaster.num_failed,
"some task(s) failed to execute")
class NoParallelTestCase(unittest.TestCase):
def runTest(self):
"test handling lack of parallel support"
def NoParallel(tm, num):
raise NameError
save_Parallel = SCons.Job.Parallel
SCons.Job.Parallel = NoParallel
try:
taskmaster = Taskmaster(num_tasks, self, RandomTask)
jobs = SCons.Job.Jobs(2, taskmaster)
self.failUnless(jobs.num_jobs == 1,
"unexpected number of jobs %d" % jobs.num_jobs)
jobs.run()
self.failUnless(taskmaster.tasks_were_serial(),
"the tasks were not executed in series")
self.failUnless(taskmaster.all_tasks_are_executed(),
"all the tests were not executed")
self.failUnless(taskmaster.all_tasks_are_iterated(),
"all the tests were not iterated over")
self.failUnless(taskmaster.all_tasks_are_postprocessed(),
"all the tests were not postprocessed")
self.failIf(taskmaster.num_failed,
"some task(s) failed to execute")
finally:
SCons.Job.Parallel = save_Parallel
class SerialExceptionTestCase(unittest.TestCase):
def runTest(self):
"test a serial job with tasks that raise exceptions"
taskmaster = Taskmaster(num_tasks, self, ExceptionTask)
jobs = SCons.Job.Jobs(1, taskmaster)
jobs.run()
self.failIf(taskmaster.num_executed,
"a task was executed")
self.failUnless(taskmaster.num_iterated == 1,
"exactly one task should have been iterated")
self.failUnless(taskmaster.num_failed == 1,
"exactly one task should have failed")
self.failUnless(taskmaster.num_postprocessed == 1,
"exactly one task should have been postprocessed")
class ParallelExceptionTestCase(unittest.TestCase):
def runTest(self):
"test parallel jobs with tasks that raise exceptions"
taskmaster = Taskmaster(num_tasks, self, ExceptionTask)
jobs = SCons.Job.Jobs(num_jobs, taskmaster)
jobs.run()
self.failIf(taskmaster.num_executed,
"a task was executed")
self.failUnless(taskmaster.num_iterated >= 1,
"one or more task should have been iterated")
self.failUnless(taskmaster.num_failed >= 1,
"one or more tasks should have failed")
self.failUnless(taskmaster.num_postprocessed >= 1,
"one or more tasks should have been postprocessed")
#---------------------------------------------------------------------
# Above tested Job object with contrived Task and Taskmaster objects.
# Now test Job object with actual Task and Taskmaster objects.
import SCons.Taskmaster
import SCons.Node
import time
class testnode (SCons.Node.Node):
def __init__(self):
SCons.Node.Node.__init__(self)
self.expect_to_be = SCons.Node.executed
class goodnode (testnode):
pass
class slowgoodnode (goodnode):
def prepare(self):
# Delay to allow scheduled Jobs to run while the dispatcher
# sleeps. Keep this short because it affects the time taken
# by this test.
time.sleep(0.15)
goodnode.prepare(self)
class badnode (goodnode):
def __init__(self):
goodnode.__init__(self)
self.expect_to_be = SCons.Node.failed
def build(self, **kw):
raise 'badnode exception'
class slowbadnode (badnode):
def build(self, **kw):
# Appears to take a while to build, allowing faster builds to
# overlap. Time duration is not especially important, but if
# it is faster than slowgoodnode then these could complete
# while the scheduler is sleeping.
time.sleep(0.05)
raise 'slowbadnode exception'
class badpreparenode (badnode):
def prepare(self):
raise 'badpreparenode exception'
class _SConsTaskTest(unittest.TestCase):
def _test_seq(self, num_jobs):
for node_seq in [
[goodnode],
[badnode],
[slowbadnode],
[slowgoodnode],
[badpreparenode],
[goodnode, badnode],
[slowgoodnode, badnode],
[goodnode, slowbadnode],
[goodnode, goodnode, goodnode, slowbadnode],
[goodnode, slowbadnode, badpreparenode, slowgoodnode],
[goodnode, slowbadnode, slowgoodnode, badnode]
]:
self._do_test(num_jobs, node_seq)
def _do_test(self, num_jobs, node_seq):
testnodes = []
for tnum in range(num_tasks):
testnodes.append(node_seq[tnum % len(node_seq)]())
taskmaster = SCons.Taskmaster.Taskmaster(testnodes)
jobs = SCons.Job.Jobs(num_jobs, taskmaster)
# Exceptions thrown by tasks are not actually propagated to
# this level, but are instead stored in the Taskmaster.
jobs.run()
# Now figure out if tests proceeded correctly. The first test
# that fails will shutdown the initiation of subsequent tests,
# but any tests currently queued for execution will still be
# processed, and any tests that completed before the failure
# would have resulted in new tests being queued for execution.
# Apply the following operational heuristics of Job.py:
# 0) An initial jobset of tasks will be queued before any
# good/bad results are obtained (from "execute" of task in
# thread).
# 1) A goodnode will complete immediately on its thread and
# allow another node to be queued for execution.
# 2) A badnode will complete immediately and suppress any
# subsequent execution queuing, but all currently queued
# tasks will still be processed.
# 3) A slowbadnode will fail later. It will block slots in
# the job queue. Nodes that complete immediately will
# allow other nodes to be queued in their place, and this
# will continue until either (#2) above or until all job
# slots are filled with slowbadnode entries.
# One approach to validating this test would be to try to
# determine exactly how many nodes executed, how many didn't,
# and the results of each, and then to assert failure on any
# mismatch (including the total number of built nodes).
# However, while this is possible to do for a single-processor
# system, it is nearly impossible to predict correctly for a
# multi-processor system and still test the characteristics of
# delayed execution nodes. Stated another way, multithreading
# is inherently non-deterministic unless you can completely
# characterize the entire system, and since that's not
# possible here, we shouldn't try.
# Therefore, this test will simply scan the set of nodes to
# see if the node was executed or not and if it was executed
# that it obtained the expected value for that node
# (i.e. verifying we don't get failure crossovers or
# mislabelling of results).
for N in testnodes:
self.failUnless(N.get_state() in [SCons.Node.no_state, N.expect_to_be],
"node ran but got unexpected result")
self.failUnless(filter(lambda N: N.get_state(), testnodes),
"no nodes ran at all.")
class SerialTaskTest(_SConsTaskTest):
def runTest(self):
"test serial jobs with actual Taskmaster and Task"
self._test_seq(1)
class ParallelTaskTest(_SConsTaskTest):
def runTest(self):
"test parallel jobs with actual Taskmaster and Task"
self._test_seq(num_jobs)
#---------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(ParallelTestCase())
suite.addTest(SerialTestCase())
suite.addTest(NoParallelTestCase())
suite.addTest(SerialExceptionTestCase())
suite.addTest(ParallelExceptionTestCase())
suite.addTest(SerialTaskTest())
suite.addTest(ParallelTaskTest())
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
result = runner.run(suite())
if (len(result.failures) == 0
and len(result.errors) == 1
and type(result.errors[0][0]) == SerialTestCase
and type(result.errors[0][1][0]) == NoThreadsException):
sys.exit(2)
elif not result.wasSuccessful():
sys.exit(1)
| mit |
suutari/shoop | shuup/importer/utils/importer.py | 1 | 1117 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum
from shuup.apps.provides import get_provide_objects
class ImportMode(Enum):
CREATE_UPDATE = "create,update"
CREATE = "create"
UPDATE = "update"
class Labels:
CREATE_UPDATE = _("Allow create and update")
CREATE = _("Only create (no updates)")
UPDATE = _("Only update existing (no new ones are created)")
def get_importer_choices():
return [(i.identifier, i.name) for i in get_provide_objects("importers")]
def get_importer(identifier):
for i in get_provide_objects("importers"):
if i.identifier == identifier:
return i
return None
def get_import_file_path(filename):
return os.path.join(settings.MEDIA_ROOT, "import_temp", os.path.basename(filename))
| agpl-3.0 |
khchine5/atelier | atelier/sphinxconf/sigal_image.py | 1 | 8318 | # -*- coding: utf-8 -*-
# Copyright 2014-2018 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""
Defines the :rst:dir:`sigal_image` directive.
.. rst:directive:: sigal_image
.. _picsel: https://github.com/lsaffre/picsel
.. _Shotwell: https://en.wikipedia.org/wiki/Shotwell_%28software%29
.. _digiKam: https://www.digikam.org/
.. _Sigal: http://sigal.saimon.org/en/latest/
This creates a bridge between a photo collection managed with
Shotwell_ or digiKam_ and a blog generated with Sphinx. All photos
remain in the single central file tree managed by Shotwell_ or
digiKam_. From within Shotwell_ or digiKam_ you use a tag "blog" to
mark all photos that are to be available for your Sphinx blog. Then
you use picsel_ to extract those images to a separate directory. This
tree serves as input for Sigal_ which will generate a static html
gallery. An example of a Sigal gallery is `here
<http://sigal.saffre-rumma.net/>`__. The :rst:dir:`sigal_image`
directive was the last missing part of this publishing bridge: it
allows you to integrate your pictures into blog entries.
Usage::
.. sigal_image:: partial/path/to/photo.jpg[|title_or_options]
For example, if `sigal_base_url` in your :xfile:`conf.py` is set to
``"http://sigal.saffre-rumma.net"``, the following directive in your
rst source file::
.. sigal_image:: 2014/04/10/img_6617.jpg
will insert the following rst code::
.. raw:: html
<a href="http://sigal.saffre-rumma.net/2014/04/10/img_6617.jpg">
<img
src="http://sigal.saffre-rumma.net/2014/04/10/thumbnails/img_6617.jpg"/>
</a>
The file name can contain **formatting instructions** inspired by
`Wikipedia pictures
<https://en.wikipedia.org/wiki/Wikipedia:Picture_tutorial>`_ which
uses a variable number of pipe characters. For example:
>>> from __future__ import print_function
>>> print(line2html("foo.jpg"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="foo.jpg"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="foo.jpg"/></a>
>>> print(line2html("foo.jpg|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:right; width:280px;" title="This is a nice picture"/></a>
>>> print(line2html("foo.jpg|thumb|left|This is a nice picture"))
<a href="http://example.com//foo.jpg" data-lightbox="image-1" data-title="This is a nice picture"/><img src="http://example.com//thumbnails/foo.jpg" style="padding:4px; float:left;; width:280px;" title="This is a nice picture"/></a>
The generated HTML also includes attributes for `lightbox
<http://lokeshdhakar.com/projects/lightbox2/>`_. In order to activate
this feature you must add the content of the lighbox :file:`dist`
directory somewhere to your web server and then change your
`layout.html` template to something like this::
{%- block extrahead %}
{{ super() }}
<script src="/data/lightbox/js/lightbox-plus-jquery.min.js"></script>
<link href="/data/lightbox/css/lightbox.css" rel="stylesheet" />
{% endblock %}
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import object
import logging
logger = logging.getLogger(__name__)
import os
# from docutils.parsers.rst import directives
from atelier.sphinxconf.insert_input import InsertInputDirective
TEMPLATE1 = """
.. raw:: html
<a href="%(target)s"><img src="%(src)s" style="padding:4px"/></a>
"""
#TEMPLATE = """<a href="%(target)s" style="%(style)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="padding:4px" title="%(caption)s"/></a>"""
TEMPLATE = """<a href="%(target)s" %(class)s data-lightbox="image-1" data-title="%(caption)s"/><img src="%(src)s" style="%(style)s" title="%(caption)s"/></a>"""
class Format(object):
@classmethod
def update_context(self, caption, tplkw):
tplkw.update(caption=caption)
tplkw.update(style="padding:4px; width:280px;")
class Thumb(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; float:right; width:280px;"
elif len(chunks) == 2:
align, caption = chunks
if not align in ("right", "left", "center"):
raise Exception("Invalid alignment '{0}'".format(align))
tplkw['style'] = "padding:4px; float:{0};; width:280px;".format(align)
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
class Wide(Format):
@classmethod
def update_context(self, caption, tplkw):
chunks = caption.split('|')
if len(chunks) == 1:
tplkw['style'] = "padding:4px; width:100%;"
else:
raise Exception("Impossible")
tplkw.update(caption=caption)
FORMATS = dict()
FORMATS[None] = Format()
FORMATS['thumb'] = Thumb()
FORMATS['wide'] = Wide()
def buildurl(*parts):
return 'http://example.com/' + '/'.join(parts)
def line2html(name, buildurl=buildurl):
name = name.strip()
if not name:
return ''
kw = dict() # style="padding:4px")
kw['class'] = ''
kw['style'] = "padding:4px; width:280px;"
if True: # new format using only | as separator
caption = name
fmt = FORMATS[None]
chunks = name.split('|', 1)
if len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
fmt.update_context(caption, kw)
if ' ' in name:
raise Exception("Invalid filename. Spaces not allowed.")
else:
chunks = name.split(None, 1)
if len(chunks) == 1:
kw.update(caption='')
elif len(chunks) == 2:
name, caption = chunks
chunks = caption.split('|', 1)
if len(chunks) == 1:
fmt = FORMATS[None]
elif len(chunks) == 2:
fmtname, caption = chunks
fmt = FORMATS[fmtname]
else:
raise Exception("Impossible")
fmt.update_context(caption, kw)
else:
raise Exception("FILENAME <whitespace> DESC %s" % chunks)
head, tail = os.path.split(name)
kw.update(target=buildurl(head, tail))
kw.update(src=buildurl(head, 'thumbnails', tail))
return TEMPLATE % kw
class SigalImage(InsertInputDirective):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
# option_spec = {
# 'style': directives.unchanged,
# 'class': directives.unchanged,
# }
def get_rst(self):
env = self.state.document.settings.env
base_url = env.config.sigal_base_url
def buildurl(*parts):
return base_url + '/' + '/'.join(parts)
s = ''
for name in self.content:
s += line2html(name, buildurl)
if s:
s = "\n\n.. raw:: html\n\n {0}\n\n".format(s)
return s
def get_headers(self):
return ['title', 'author', 'date']
def format_entry(self, e):
cells = []
# text = ''.join([unicode(c) for c in e.title.children])
# cells.append(":doc:`%s <%s>`" % (text, e.docname))
cells.append(":doc:`%s`" % e.docname)
cells.append(str(e.meta.get('author', '')))
cells.append(str(e.meta.get('date', '')))
return cells
def setup(app):
app.add_config_value(
'sigal_base_url', 'http://sigal.saffre-rumma.net', True)
app.add_directive('sigal_image', SigalImage)
# app.add_role(str('rref'), ReferingRefRole(
# lowercase=True,
# innernodeclass=nodes.emphasis,
# warn_dangling=True))
| bsd-2-clause |
shhui/nova | nova/tests/api/openstack/compute/plugins/v3/test_lock_server.py | 28 | 2235 | # Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.plugins.v3 import lock_server
from nova import exception
from nova.tests.api.openstack.compute.plugins.v3 import \
admin_only_action_common
from nova.tests.api.openstack import fakes
class LockServerTests(admin_only_action_common.CommonTests):
def setUp(self):
super(LockServerTests, self).setUp()
self.controller = lock_server.LockServerController()
self.compute_api = self.controller.compute_api
def _fake_controller(*args, **kwargs):
return self.controller
self.stubs.Set(lock_server, 'LockServerController',
_fake_controller)
self.app = fakes.wsgi_app_v3(init_only=('servers',
'os-lock-server'),
fake_auth_context=self.context)
self.mox.StubOutWithMock(self.compute_api, 'get')
def test_lock_unlock(self):
self._test_actions(['lock', 'unlock'])
def test_lock_unlock_with_non_existed_instance(self):
self._test_actions_with_non_existed_instance(['lock', 'unlock'])
def test_unlock_not_authorized(self):
self.mox.StubOutWithMock(self.compute_api, 'unlock')
instance = self._stub_instance_get()
self.compute_api.unlock(self.context, instance).AndRaise(
exception.PolicyNotAuthorized(action='unlock'))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{'unlock': None})
self.assertEqual(403, res.status_int)
| apache-2.0 |
chauhanhardik/populo | common/lib/xmodule/xmodule/tests/test_self_assessment.py | 92 | 6785 | from datetime import datetime
import json
import unittest
from mock import Mock, MagicMock
from webob.multidict import MultiDict
from pytz import UTC
from xblock.fields import ScopeIds
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from opaque_keys.edx.locations import Location
from lxml import etree
from . import get_test_system
import test_util_open_ended
class SelfAssessmentTest(unittest.TestCase):
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
</category>
</rubric></rubric>'''
prompt = etree.XML("<prompt>This is sample prompt text.</prompt>")
definition = {
'rubric': rubric,
'prompt': prompt,
'submitmessage': 'Shall we submit now?',
'hintprompt': 'Consider this...',
}
location = Location("edX", "sa_test", "run", "selfassessment", "SampleQuestion", None)
descriptor = Mock()
def setUp(self):
super(SelfAssessmentTest, self).setUp()
self.static_data = {
'max_attempts': 10,
'rubric': etree.XML(self.rubric),
'prompt': self.prompt,
'max_score': 1,
'display_name': "Name",
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
system = get_test_system()
usage_key = system.course_id.make_usage_key('combinedopenended', 'test_loc')
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
system.xmodule_instance = Mock(scope_ids=scope_ids)
self.module = SelfAssessmentModule(
system,
self.location,
self.definition,
self.descriptor,
self.static_data
)
def test_get_html(self):
html = self.module.get_html(self.module.system)
self.assertTrue("This is sample prompt text" in html)
def test_self_assessment_flow(self):
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
def get_fake_item(name):
return responses[name]
def get_data_for_location(self, location, student):
return {
'count_graded': 0,
'count_required': 0,
'student_sub_count': 0,
}
mock_query_dict = MagicMock()
mock_query_dict.__getitem__.side_effect = get_fake_item
mock_query_dict.getall = get_fake_item
self.module.peer_gs.get_data_for_location = get_data_for_location
self.assertEqual(self.module.get_score()['score'], 0)
self.module.save_answer({'student_answer': "I am an answer"},
self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
d = self.module.reset({})
self.assertTrue(d['success'])
self.assertEqual(self.module.child_state, self.module.INITIAL)
# if we now assess as right, skip the REQUEST_HINT state
self.module.save_answer({'student_answer': 'answer 4'},
self.module.system)
responses['assessment'] = '1'
self.module.save_assessment(mock_query_dict, self.module.system)
self.assertEqual(self.module.child_state, self.module.DONE)
def test_self_assessment_display(self):
"""
Test storing an answer with the self assessment module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = SelfAssessmentModule(
get_test_system(),
self.location,
self.definition,
self.descriptor,
self.static_data
)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Submit a student response to the question.
test_module.handle_ajax("save_answer", {"student_answer": submitted_response}, get_test_system())
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
# Mock saving an assessment.
assessment_dict = MultiDict({'assessment': 0, 'score_list[]': 0})
data = test_module.handle_ajax("save_assessment", assessment_dict, get_test_system())
self.assertTrue(json.loads(data)['success'])
# Reset the module so the student can try again.
test_module.reset(get_test_system())
# Confirm that the right response is loaded.
self.assertEqual(test_module.get_display_answer(), submitted_response)
def test_save_assessment_after_closing(self):
"""
Test storing assessment when close date is passed.
"""
responses = {'assessment': '0', 'score_list[]': ['0', '0']}
self.module.save_answer({'student_answer': "I am an answer"}, self.module.system)
self.assertEqual(self.module.child_state, self.module.ASSESSING)
#Set close date to current datetime.
self.module.close_date = datetime.now(UTC)
#Save assessment when close date is passed.
self.module.save_assessment(responses, self.module.system)
self.assertNotEqual(self.module.child_state, self.module.DONE)
| agpl-3.0 |
amsehili/auditok | tests/test_AudioReader.py | 1 | 30622 | """
@author: Amine Sehili <amine.sehili@gmail.com>
September 2015
"""
import unittest
from functools import partial
import sys
import wave
from genty import genty, genty_dataset
from auditok import (
dataset,
ADSFactory,
AudioDataSource,
AudioReader,
Recorder,
BufferAudioSource,
WaveAudioSource,
DuplicateArgument,
)
class TestADSFactoryFileAudioSource(unittest.TestCase):
def setUp(self):
self.audio_source = WaveAudioSource(
filename=dataset.one_to_six_arabic_16000_mono_bc_noise
)
def test_ADS_type(self):
ads = ADSFactory.ads(audio_source=self.audio_source)
err_msg = "wrong type for ads object, expected: 'AudioDataSource', "
err_msg += "found: {0}"
self.assertIsInstance(
ads, AudioDataSource, err_msg.format(type(ads)),
)
def test_default_block_size(self):
ads = ADSFactory.ads(audio_source=self.audio_source)
size = ads.block_size
self.assertEqual(
size,
160,
"Wrong default block_size, expected: 160, found: {0}".format(size),
)
def test_block_size(self):
ads = ADSFactory.ads(audio_source=self.audio_source, block_size=512)
size = ads.block_size
self.assertEqual(
size,
512,
"Wrong block_size, expected: 512, found: {0}".format(size),
)
# with alias keyword
ads = ADSFactory.ads(audio_source=self.audio_source, bs=160)
size = ads.block_size
self.assertEqual(
size,
160,
"Wrong block_size, expected: 160, found: {0}".format(size),
)
def test_block_duration(self):
ads = ADSFactory.ads(
audio_source=self.audio_source, block_dur=0.01
) # 10 ms
size = ads.block_size
self.assertEqual(
size,
160,
"Wrong block_size, expected: 160, found: {0}".format(size),
)
# with alias keyword
ads = ADSFactory.ads(audio_source=self.audio_source, bd=0.025) # 25 ms
size = ads.block_size
self.assertEqual(
size,
400,
"Wrong block_size, expected: 400, found: {0}".format(size),
)
def test_hop_duration(self):
ads = ADSFactory.ads(
audio_source=self.audio_source, block_dur=0.02, hop_dur=0.01
) # 10 ms
size = ads.hop_size
self.assertEqual(
size, 160, "Wrong hop_size, expected: 160, found: {0}".format(size)
)
# with alias keyword
ads = ADSFactory.ads(
audio_source=self.audio_source, bd=0.025, hop_dur=0.015
) # 15 ms
size = ads.hop_size
self.assertEqual(
size,
240,
"Wrong block_size, expected: 240, found: {0}".format(size),
)
def test_sampling_rate(self):
ads = ADSFactory.ads(audio_source=self.audio_source)
srate = ads.sampling_rate
self.assertEqual(
srate,
16000,
"Wrong sampling rate, expected: 16000, found: {0}".format(srate),
)
def test_sample_width(self):
ads = ADSFactory.ads(audio_source=self.audio_source)
swidth = ads.sample_width
self.assertEqual(
swidth,
2,
"Wrong sample width, expected: 2, found: {0}".format(swidth),
)
def test_channels(self):
ads = ADSFactory.ads(audio_source=self.audio_source)
channels = ads.channels
self.assertEqual(
channels,
1,
"Wrong number of channels, expected: 1, found: {0}".format(
channels
),
)
def test_read(self):
ads = ADSFactory.ads(audio_source=self.audio_source, block_size=256)
ads.open()
ads_data = ads.read()
ads.close()
audio_source = WaveAudioSource(
filename=dataset.one_to_six_arabic_16000_mono_bc_noise
)
audio_source.open()
audio_source_data = audio_source.read(256)
audio_source.close()
self.assertEqual(
ads_data, audio_source_data, "Unexpected data read from ads"
)
def test_Limiter_Deco_read(self):
# read a maximum of 0.75 seconds from audio source
ads = ADSFactory.ads(audio_source=self.audio_source, max_time=0.75)
ads_data = []
ads.open()
while True:
block = ads.read()
if block is None:
break
ads_data.append(block)
ads.close()
ads_data = b"".join(ads_data)
audio_source = WaveAudioSource(
filename=dataset.one_to_six_arabic_16000_mono_bc_noise
)
audio_source.open()
audio_source_data = audio_source.read(int(16000 * 0.75))
audio_source.close()
self.assertEqual(
ads_data, audio_source_data, "Unexpected data read from LimiterADS"
)
def test_Limiter_Deco_read_limit(self):
# read a maximum of 1.191 seconds from audio source
ads = ADSFactory.ads(audio_source=self.audio_source, max_time=1.191)
total_samples = round(ads.sampling_rate * 1.191)
nb_full_blocks, last_block_size = divmod(total_samples, ads.block_size)
total_samples_with_overlap = (
nb_full_blocks * ads.block_size + last_block_size
)
expected_read_bytes = (
total_samples_with_overlap * ads.sw * ads.channels
)
total_read = 0
ads.open()
i = 0
while True:
block = ads.read()
if block is None:
break
i += 1
total_read += len(block)
ads.close()
err_msg = "Wrong data length read from LimiterADS, expected: {0}, "
err_msg += "found: {1}"
self.assertEqual(
total_read,
expected_read_bytes,
err_msg.format(expected_read_bytes, total_read),
)
def test_Recorder_Deco_read(self):
ads = ADSFactory.ads(
audio_source=self.audio_source, record=True, block_size=500
)
ads_data = []
ads.open()
for i in range(10):
block = ads.read()
if block is None:
break
ads_data.append(block)
ads.close()
ads_data = b"".join(ads_data)
audio_source = WaveAudioSource(
filename=dataset.one_to_six_arabic_16000_mono_bc_noise
)
audio_source.open()
audio_source_data = audio_source.read(500 * 10)
audio_source.close()
self.assertEqual(
ads_data,
audio_source_data,
"Unexpected data read from RecorderADS",
)
def test_Recorder_Deco_is_rewindable(self):
ads = ADSFactory.ads(audio_source=self.audio_source, record=True)
self.assertTrue(
ads.rewindable, "RecorderADS.is_rewindable should return True"
)
def test_Recorder_Deco_rewind_and_read(self):
ads = ADSFactory.ads(
audio_source=self.audio_source, record=True, block_size=320
)
ads.open()
for i in range(10):
ads.read()
ads.rewind()
# read all available data after rewind
ads_data = []
while True:
block = ads.read()
if block is None:
break
ads_data.append(block)
ads.close()
ads_data = b"".join(ads_data)
audio_source = WaveAudioSource(
filename=dataset.one_to_six_arabic_16000_mono_bc_noise
)
audio_source.open()
audio_source_data = audio_source.read(320 * 10)
audio_source.close()
self.assertEqual(
ads_data,
audio_source_data,
"Unexpected data read from RecorderADS",
)
def test_Overlap_Deco_read(self):
# Use arbitrary valid block_size and hop_size
block_size = 1714
hop_size = 313
ads = ADSFactory.ads(
audio_source=self.audio_source,
block_size=block_size,
hop_size=hop_size,
)
# Read all available data overlapping blocks
ads.open()
ads_data = []
while True:
block = ads.read()
if block is None:
break
ads_data.append(block)
ads.close()
# Read all data from file and build a BufferAudioSource
fp = wave.open(dataset.one_to_six_arabic_16000_mono_bc_noise, "r")
wave_data = fp.readframes(fp.getnframes())
fp.close()
audio_source = BufferAudioSource(
wave_data, ads.sampling_rate, ads.sample_width, ads.channels
)
audio_source.open()
# Compare all blocks read from OverlapADS to those read
# from an audio source with a manual position setting
for i, block in enumerate(ads_data):
tmp = audio_source.read(block_size)
self.assertEqual(
block,
tmp,
"Unexpected block (N={0}) read from OverlapADS".format(i),
)
audio_source.position = (i + 1) * hop_size
audio_source.close()
def test_Limiter_Overlap_Deco_read(self):
block_size = 256
hop_size = 200
ads = ADSFactory.ads(
audio_source=self.audio_source,
max_time=0.50,
block_size=block_size,
hop_size=hop_size,
)
# Read all available data overlapping blocks
ads.open()
ads_data = []
while True:
block = ads.read()
if block is None:
break
ads_data.append(block)
ads.close()
# Read all data from file and build a BufferAudioSource
fp = wave.open(dataset.one_to_six_arabic_16000_mono_bc_noise, "r")
wave_data = fp.readframes(fp.getnframes())
fp.close()
audio_source = BufferAudioSource(
wave_data, ads.sampling_rate, ads.sample_width, ads.channels
)
audio_source.open()
# Compare all blocks read from OverlapADS to those read
# from an audio source with a manual position setting
for i, block in enumerate(ads_data):
tmp = audio_source.read(len(block) // (ads.sw * ads.ch))
self.assertEqual(
len(block),
len(tmp),
"Unexpected block (N={0}) read from OverlapADS".format(i),
)
audio_source.position = (i + 1) * hop_size
audio_source.close()
def test_Limiter_Overlap_Deco_read_limit(self):
block_size = 313
hop_size = 207
ads = ADSFactory.ads(
audio_source=self.audio_source,
max_time=1.932,
block_size=block_size,
hop_size=hop_size,
)
total_samples = round(ads.sampling_rate * 1.932)
first_read_size = block_size
next_read_size = block_size - hop_size
nb_next_blocks, last_block_size = divmod(
(total_samples - first_read_size), next_read_size
)
total_samples_with_overlap = (
first_read_size + next_read_size * nb_next_blocks + last_block_size
)
expected_read_bytes = (
total_samples_with_overlap * ads.sw * ads.channels
)
cache_size = (block_size - hop_size) * ads.sample_width * ads.channels
total_read = cache_size
ads.open()
i = 0
while True:
block = ads.read()
if block is None:
break
i += 1
total_read += len(block) - cache_size
ads.close()
err_msg = "Wrong data length read from LimiterADS, expected: {0}, "
err_msg += "found: {1}"
self.assertEqual(
total_read,
expected_read_bytes,
err_msg.format(expected_read_bytes, total_read),
)
def test_Recorder_Overlap_Deco_is_rewindable(self):
ads = ADSFactory.ads(
audio_source=self.audio_source,
block_size=320,
hop_size=160,
record=True,
)
self.assertTrue(
ads.rewindable, "RecorderADS.is_rewindable should return True"
)
def test_Recorder_Overlap_Deco_rewind_and_read(self):
# Use arbitrary valid block_size and hop_size
block_size = 1600
hop_size = 400
ads = ADSFactory.ads(
audio_source=self.audio_source,
block_size=block_size,
hop_size=hop_size,
record=True,
)
# Read all available data overlapping blocks
ads.open()
i = 0
while True:
block = ads.read()
if block is None:
break
i += 1
ads.rewind()
# Read all data from file and build a BufferAudioSource
fp = wave.open(dataset.one_to_six_arabic_16000_mono_bc_noise, "r")
wave_data = fp.readframes(fp.getnframes())
fp.close()
audio_source = BufferAudioSource(
wave_data, ads.sampling_rate, ads.sample_width, ads.channels
)
audio_source.open()
# Compare all blocks read from OverlapADS to those read
# from an audio source with a manual position setting
for j in range(i):
tmp = audio_source.read(block_size)
self.assertEqual(
ads.read(),
tmp,
"Unexpected block (N={0}) read from OverlapADS".format(i),
)
audio_source.position = (j + 1) * hop_size
ads.close()
audio_source.close()
def test_Limiter_Recorder_Overlap_Deco_rewind_and_read(self):
# Use arbitrary valid block_size and hop_size
block_size = 1600
hop_size = 400
ads = ADSFactory.ads(
audio_source=self.audio_source,
max_time=1.50,
block_size=block_size,
hop_size=hop_size,
record=True,
)
# Read all available data overlapping blocks
ads.open()
i = 0
while True:
block = ads.read()
if block is None:
break
i += 1
ads.rewind()
# Read all data from file and build a BufferAudioSource
fp = wave.open(dataset.one_to_six_arabic_16000_mono_bc_noise, "r")
wave_data = fp.readframes(fp.getnframes())
fp.close()
audio_source = BufferAudioSource(
wave_data, ads.sampling_rate, ads.sample_width, ads.channels
)
audio_source.open()
# Compare all blocks read from OverlapADS to those read
# from an audio source with a manual position setting
for j in range(i):
tmp = audio_source.read(block_size)
self.assertEqual(
ads.read(),
tmp,
"Unexpected block (N={0}) read from OverlapADS".format(i),
)
audio_source.position = (j + 1) * hop_size
ads.close()
audio_source.close()
def test_Limiter_Recorder_Overlap_Deco_rewind_and_read_limit(self):
# Use arbitrary valid block_size and hop_size
block_size = 1000
hop_size = 200
ads = ADSFactory.ads(
audio_source=self.audio_source,
max_time=1.317,
block_size=block_size,
hop_size=hop_size,
record=True,
)
total_samples = round(ads.sampling_rate * 1.317)
first_read_size = block_size
next_read_size = block_size - hop_size
nb_next_blocks, last_block_size = divmod(
(total_samples - first_read_size), next_read_size
)
total_samples_with_overlap = (
first_read_size + next_read_size * nb_next_blocks + last_block_size
)
expected_read_bytes = (
total_samples_with_overlap * ads.sw * ads.channels
)
cache_size = (block_size - hop_size) * ads.sample_width * ads.channels
total_read = cache_size
ads.open()
i = 0
while True:
block = ads.read()
if block is None:
break
i += 1
total_read += len(block) - cache_size
ads.close()
err_msg = "Wrong data length read from LimiterADS, expected: {0}, "
err_msg += "found: {1}"
self.assertEqual(
total_read,
expected_read_bytes,
err_msg.format(expected_read_bytes, total_read),
)
class TestADSFactoryBufferAudioSource(unittest.TestCase):
def setUp(self):
self.signal = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
self.ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
block_size=4,
)
def test_ADS_BAS_sampling_rate(self):
srate = self.ads.sampling_rate
self.assertEqual(
srate,
16,
"Wrong sampling rate, expected: 16000, found: {0}".format(srate),
)
def test_ADS_BAS_sample_width(self):
swidth = self.ads.sample_width
self.assertEqual(
swidth,
2,
"Wrong sample width, expected: 2, found: {0}".format(swidth),
)
def test_ADS_BAS_channels(self):
channels = self.ads.channels
self.assertEqual(
channels,
1,
"Wrong number of channels, expected: 1, found: {0}".format(
channels
),
)
def test_Limiter_Recorder_Overlap_Deco_rewind_and_read(self):
# Use arbitrary valid block_size and hop_size
block_size = 5
hop_size = 4
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
max_time=0.80,
block_size=block_size,
hop_size=hop_size,
record=True,
)
# Read all available data overlapping blocks
ads.open()
i = 0
while True:
block = ads.read()
if block is None:
break
i += 1
ads.rewind()
# Build a BufferAudioSource
audio_source = BufferAudioSource(
self.signal, ads.sampling_rate, ads.sample_width, ads.channels
)
audio_source.open()
# Compare all blocks read from OverlapADS to those read
# from an audio source with a manual position setting
for j in range(i):
tmp = audio_source.read(block_size)
block = ads.read()
self.assertEqual(
block,
tmp,
"Unexpected block '{}' (N={}) read from OverlapADS".format(
block, i
),
)
audio_source.position = (j + 1) * hop_size
ads.close()
audio_source.close()
class TestADSFactoryAlias(unittest.TestCase):
def setUp(self):
self.signal = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
def test_sampling_rate_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sr=16,
sample_width=2,
channels=1,
block_dur=0.5,
)
srate = ads.sampling_rate
self.assertEqual(
srate,
16,
"Wrong sampling rate, expected: 16000, found: {0}".format(srate),
)
def test_sampling_rate_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sr=16,
sampling_rate=16,
sample_width=2,
channels=1,
)
self.assertRaises(DuplicateArgument, func)
def test_sample_width_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sw=2,
channels=1,
block_dur=0.5,
)
swidth = ads.sample_width
self.assertEqual(
swidth,
2,
"Wrong sample width, expected: 2, found: {0}".format(swidth),
)
def test_sample_width_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sw=2,
sample_width=2,
channels=1,
)
self.assertRaises(DuplicateArgument, func)
def test_channels_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
ch=1,
block_dur=4,
)
channels = ads.channels
self.assertEqual(
channels,
1,
"Wrong number of channels, expected: 1, found: {0}".format(
channels
),
)
def test_channels_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
ch=1,
channels=1,
)
self.assertRaises(DuplicateArgument, func)
def test_block_size_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bs=8,
)
size = ads.block_size
self.assertEqual(
size,
8,
"Wrong block_size using bs alias, expected: 8, found: {0}".format(
size
),
)
def test_block_size_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bs=4,
block_size=4,
)
self.assertRaises(DuplicateArgument, func)
def test_block_duration_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bd=0.75,
)
# 0.75 ms = 0.75 * 16 = 12
size = ads.block_size
err_msg = "Wrong block_size set with a block_dur alias 'bd', "
err_msg += "expected: 8, found: {0}"
self.assertEqual(
size, 12, err_msg.format(size),
)
def test_block_duration_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bd=4,
block_dur=4,
)
self.assertRaises(DuplicateArgument, func)
def test_block_size_duration_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bd=4,
bs=12,
)
self.assertRaises(DuplicateArgument, func)
def test_hop_duration_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bd=0.75,
hd=0.5,
)
size = ads.hop_size
self.assertEqual(
size,
8,
"Wrong block_size using bs alias, expected: 8, found: {0}".format(
size
),
)
def test_hop_duration_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bd=0.75,
hd=0.5,
hop_dur=0.5,
)
self.assertRaises(DuplicateArgument, func)
def test_hop_size_duration_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bs=8,
hs=4,
hd=1,
)
self.assertRaises(DuplicateArgument, func)
def test_hop_size_greater_than_block_size(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
bs=4,
hs=8,
)
self.assertRaises(ValueError, func)
def test_filename_duplicate(self):
func = partial(
ADSFactory.ads,
fn=dataset.one_to_six_arabic_16000_mono_bc_noise,
filename=dataset.one_to_six_arabic_16000_mono_bc_noise,
)
self.assertRaises(DuplicateArgument, func)
def test_data_buffer_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
db=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
)
self.assertRaises(DuplicateArgument, func)
def test_max_time_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
mt=10,
block_dur=0.5,
)
self.assertEqual(
ads.max_read,
10,
"Wrong AudioDataSource.max_read, expected: 10, found: {}".format(
ads.max_read
),
)
def test_max_time_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
mt=True,
max_time=True,
)
self.assertRaises(DuplicateArgument, func)
def test_record_alias(self):
ads = ADSFactory.ads(
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
rec=True,
block_dur=0.5,
)
self.assertTrue(
ads.rewindable, "AudioDataSource.rewindable expected to be True"
)
def test_record_duplicate(self):
func = partial(
ADSFactory.ads,
data_buffer=self.signal,
sampling_rate=16,
sample_width=2,
channels=1,
rec=True,
record=True,
)
self.assertRaises(DuplicateArgument, func)
def test_Limiter_Recorder_Overlap_Deco_rewind_and_read_alias(self):
# Use arbitrary valid block_size and hop_size
block_size = 5
hop_size = 4
ads = ADSFactory.ads(
db=self.signal,
sr=16,
sw=2,
ch=1,
mt=0.80,
bs=block_size,
hs=hop_size,
rec=True,
)
# Read all available data overlapping blocks
ads.open()
i = 0
while True:
block = ads.read()
if block is None:
break
i += 1
ads.rewind()
# Build a BufferAudioSource
audio_source = BufferAudioSource(
self.signal, ads.sampling_rate, ads.sample_width, ads.channels
)
audio_source.open()
# Compare all blocks read from AudioDataSource to those read
# from an audio source with manual position definition
for j in range(i):
tmp = audio_source.read(block_size)
block = ads.read()
self.assertEqual(
block,
tmp,
"Unexpected block (N={0}) read from OverlapADS".format(i),
)
audio_source.position = (j + 1) * hop_size
ads.close()
audio_source.close()
def _read_all_data(reader):
blocks = []
while True:
data = reader.read()
if data is None:
break
blocks.append(data)
return b"".join(blocks)
@genty
class TestAudioReader(unittest.TestCase):
# TODO move all tests here when backward compatibility
# with ADSFactory is dropped
@genty_dataset(
mono=("mono_400", 0.5, 16000),
multichannel=("3channel_400-800-1600", 0.5, 16000 * 3),
)
def test_Limiter(self, file_id, max_read, size):
input_wav = "tests/data/test_16KHZ_{}Hz.wav".format(file_id)
input_raw = "tests/data/test_16KHZ_{}Hz.raw".format(file_id)
with open(input_raw, "rb") as fp:
expected = fp.read(size)
reader = AudioReader(input_wav, block_dur=0.1, max_read=max_read)
reader.open()
data = _read_all_data(reader)
reader.close()
self.assertEqual(data, expected)
@genty_dataset(mono=("mono_400",), multichannel=("3channel_400-800-1600",))
def test_Recorder(self, file_id):
input_wav = "tests/data/test_16KHZ_{}Hz.wav".format(file_id)
input_raw = "tests/data/test_16KHZ_{}Hz.raw".format(file_id)
with open(input_raw, "rb") as fp:
expected = fp.read()
reader = AudioReader(input_wav, block_dur=0.1, record=True)
reader.open()
data = _read_all_data(reader)
self.assertEqual(data, expected)
# rewind many times
for _ in range(3):
reader.rewind()
data = _read_all_data(reader)
self.assertEqual(data, expected)
self.assertEqual(data, reader.data)
reader.close()
@genty_dataset(mono=("mono_400",), multichannel=("3channel_400-800-1600",))
def test_Recorder_alias(self, file_id):
input_wav = "tests/data/test_16KHZ_{}Hz.wav".format(file_id)
input_raw = "tests/data/test_16KHZ_{}Hz.raw".format(file_id)
with open(input_raw, "rb") as fp:
expected = fp.read()
reader = Recorder(input_wav, block_dur=0.1)
reader.open()
data = _read_all_data(reader)
self.assertEqual(data, expected)
# rewind many times
for _ in range(3):
reader.rewind()
data = _read_all_data(reader)
self.assertEqual(data, expected)
self.assertEqual(data, reader.data)
reader.close()
if __name__ == "__main__":
unittest.main()
| mit |
rnikiforova/GuruTubeProject | GuruTube/libraries/django/template/loaders/eggs.py | 111 | 1205 | # Wrapper for loading templates from eggs via pkg_resources.resource_string.
from __future__ import unicode_literals
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils import six
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
resource = resource_string(app, pkg_name)
except Exception:
continue
if six.PY2:
resource = resource.decode(settings.FILE_CHARSET)
return (resource, 'egg:%s:%s' % (app, pkg_name))
raise TemplateDoesNotExist(template_name)
| bsd-3-clause |
cfei18/incubator-airflow | airflow/migrations/versions/338e90f54d61_more_logging_into_task_isntance.py | 9 | 1613 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""More logging into task_isntance
Revision ID: 338e90f54d61
Revises: 13eb55f81627
Create Date: 2015-08-25 06:09:20.460147
"""
# revision identifiers, used by Alembic.
revision = '338e90f54d61'
down_revision = '13eb55f81627'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('task_instance', sa.Column('operator', sa.String(length=1000), nullable=True))
op.add_column('task_instance', sa.Column('queued_dttm', sa.DateTime(), nullable=True))
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('task_instance', 'queued_dttm')
op.drop_column('task_instance', 'operator')
### end Alembic commands ###
| apache-2.0 |
mandeepdhami/nova | nova/api/openstack/compute/contrib/instance_actions.py | 78 | 3879 | # Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
authorize_actions = extensions.extension_authorizer('compute',
'instance_actions')
authorize_events = extensions.soft_extension_authorizer('compute',
'instance_actions:events')
ACTION_KEYS = ['action', 'instance_uuid', 'request_id', 'user_id',
'project_id', 'start_time', 'message']
EVENT_KEYS = ['event', 'start_time', 'finish_time', 'result', 'traceback']
class InstanceActionsController(wsgi.Controller):
def __init__(self):
super(InstanceActionsController, self).__init__()
self.compute_api = compute.API()
self.action_api = compute.InstanceActionAPI()
def _format_action(self, action_raw):
action = {}
for key in ACTION_KEYS:
action[key] = action_raw.get(key)
return action
def _format_event(self, event_raw):
event = {}
for key in EVENT_KEYS:
event[key] = event_raw.get(key)
return event
def index(self, req, server_id):
"""Returns the list of actions recorded for a given instance."""
context = req.environ["nova.context"]
instance = common.get_instance(self.compute_api, context, server_id)
authorize_actions(context, target=instance)
actions_raw = self.action_api.actions_get(context, instance)
actions = [self._format_action(action) for action in actions_raw]
return {'instanceActions': actions}
def show(self, req, server_id, id):
"""Return data about the given instance action."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id)
authorize_actions(context, target=instance)
action = self.action_api.action_get_by_request_id(context, instance,
id)
if action is None:
raise exc.HTTPNotFound()
action_id = action['id']
action = self._format_action(action)
if authorize_events(context):
events_raw = self.action_api.action_events_get(context, instance,
action_id)
action['events'] = [self._format_event(evt) for evt in events_raw]
return {'instanceAction': action}
class Instance_actions(extensions.ExtensionDescriptor):
"""View a log of actions and events taken on an instance."""
name = "InstanceActions"
alias = "os-instance-actions"
namespace = ("http://docs.openstack.org/compute/ext/"
"instance-actions/api/v1.1")
updated = "2013-02-08T00:00:00Z"
def get_resources(self):
ext = extensions.ResourceExtension('os-instance-actions',
InstanceActionsController(),
parent=dict(
member_name='server',
collection_name='servers'))
return [ext]
| apache-2.0 |
alebcay/android_kernel_oneplus_msm8974 | tools/perf/scripts/python/net_dropmonitor.py | 1258 | 1562 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms[::-1]:
if loc >= i['loc']:
return (i['name'], loc - i['loc'])
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
nicoboss/Floatmotion | pygame/tests/display_test.py | 26 | 21778 | if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest
else:
from test.test_utils import test_not_implemented, unittest
import pygame, pygame.transform
class DisplayModuleTest( unittest.TestCase ):
def test_update( self ):
""" see if pygame.display.update takes rects with negative values.
"|Tags:display|"
"""
if 1:
pygame.init()
screen = pygame.display.set_mode((100,100))
screen.fill((55,55,55))
r1 = pygame.Rect(0,0,100,100)
pygame.display.update(r1)
r2 = pygame.Rect(-10,0,100,100)
pygame.display.update(r2)
r3 = pygame.Rect(-10,0,-100,-100)
pygame.display.update(r3)
# NOTE: if I don't call pygame.quit there is a segfault. hrmm.
pygame.quit()
# I think it's because unittest runs stuff in threads
# here's a stack trace...
# NOTE to author of above:
# unittest doesn't run tests in threads
# segfault was probably caused by another tests need
# for a "clean slate"
"""
#0 0x08103b7c in PyFrame_New ()
#1 0x080bd666 in PyEval_EvalCodeEx ()
#2 0x08105202 in PyFunction_SetClosure ()
#3 0x080595ae in PyObject_Call ()
#4 0x080b649f in PyEval_CallObjectWithKeywords ()
#5 0x08059585 in PyObject_CallObject ()
#6 0xb7f7aa2d in initbase () from /usr/lib/python2.4/site-packages/pygame/base.so
#7 0x080e09bd in Py_Finalize ()
#8 0x08055597 in Py_Main ()
#9 0xb7e04eb0 in __libc_start_main () from /lib/tls/libc.so.6
#10 0x08054e31 in _start ()
"""
def todo_test_Info(self):
# __doc__ (as of 2008-08-02) for pygame.display.Info:
# pygame.display.Info(): return VideoInfo
# Create a video display information object
#
# Creates a simple object containing several attributes to describe
# the current graphics environment. If this is called before
# pygame.display.set_mode() some platforms can provide information
# about the default display mode. This can also be called after
# setting the display mode to verify specific display options were
# satisfied. The VidInfo object has several attributes:
#
# hw: True if the display is hardware accelerated
# wm: True if windowed display modes can be used
# video_mem: The megabytes of video memory on the display. This is 0 if unknown
# bitsize: Number of bits used to store each pixel
# bytesize: Number of bytes used to store each pixel
# masks: Four values used to pack RGBA values into pixels
# shifts: Four values used to pack RGBA values into pixels
# losses: Four values used to pack RGBA values into pixels
# blit_hw: True if hardware Surface blitting is accelerated
# blit_hw_CC: True if hardware Surface colorkey blitting is accelerated
# blit_hw_A: True if hardware Surface pixel alpha blitting is accelerated
# blit_sw: True if software Surface blitting is accelerated
# blit_sw_CC: True if software Surface colorkey blitting is accelerated
# blit_sw_A: True if software Surface pixel alpha blitting is acclerated
# current_h, current_h: Width and height of the current video mode, or of the
# desktop mode if called before the display.set_mode is called.
# (current_h, current_w are available since SDL 1.2.10, and pygame 1.8.0)
# They are -1 on error, or if an old SDL is being used.
self.fail()
if 0:
pygame.init()
inf = pygame.display.Info()
print ("before a display mode has been set")
print (inf)
self.assertNotEqual(inf.current_h, -1)
self.assertNotEqual(inf.current_w, -1)
#probably have an older SDL than 1.2.10 if -1.
screen = pygame.display.set_mode((100,100))
inf = pygame.display.Info()
print (inf)
self.assertNotEqual(inf.current_h, -1)
self.assertEqual(inf.current_h, 100)
self.assertEqual(inf.current_w, 100)
#pygame.quit()
def todo_test_flip(self):
# __doc__ (as of 2008-08-02) for pygame.display.flip:
# pygame.display.flip(): return None
# update the full display Surface to the screen
#
# This will update the contents of the entire display. If your display
# mode is using the flags pygame.HWSURFACE and pygame.DOUBLEBUF, this
# will wait for a vertical retrace and swap the surfaces. If you are
# using a different type of display mode, it will simply update the
# entire contents of the surface.
#
# When using an pygame.OPENGL display mode this will perform a gl buffer swap.
self.fail()
def todo_test_get_active(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_active:
# pygame.display.get_active(): return bool
# true when the display is active on the display
#
# After pygame.display.set_mode() is called the display Surface will
# be visible on the screen. Most windowed displays can be hidden by
# the user. If the display Surface is hidden or iconified this will
# return False.
#
self.fail()
def todo_test_get_caption(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_caption:
# pygame.display.get_caption(): return (title, icontitle)
# get the current window caption
#
# Returns the title and icontitle for the display Surface. These will
# often be the same value.
#
self.fail()
def todo_test_get_driver(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_driver:
# pygame.display.get_driver(): return name
# get the name of the pygame display backend
#
# Pygame chooses one of many available display backends when it is
# initialized. This returns the internal name used for the display
# backend. This can be used to provide limited information about what
# display capabilities might be accelerated. See the SDL_VIDEODRIVER
# flags in pygame.display.set_mode() to see some of the common
# options.
#
self.fail()
def todo_test_get_init(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_init:
# pygame.display.get_init(): return bool
# true if the display module is initialized
#
# Returns True if the pygame.display module is currently initialized.
self.fail()
def todo_test_get_surface(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_surface:
# pygame.display.get_surface(): return Surface
# get a reference to the currently set display surface
#
# Return a reference to the currently set display Surface. If no
# display mode has been set this will return None.
#
self.fail()
def todo_test_get_wm_info(self):
# __doc__ (as of 2008-08-02) for pygame.display.get_wm_info:
# pygame.display.get_wm_info(): return dict
# Get information about the current windowing system
#
# Creates a dictionary filled with string keys. The strings and values
# are arbitrarily created by the system. Some systems may have no
# information and an empty dictionary will be returned. Most platforms
# will return a "window" key with the value set to the system id for
# the current display.
#
# New with pygame 1.7.1
self.fail()
def todo_test_gl_get_attribute(self):
# __doc__ (as of 2008-08-02) for pygame.display.gl_get_attribute:
# pygame.display.gl_get_attribute(flag): return value
# get the value for an opengl flag for the current display
#
# After calling pygame.display.set_mode() with the pygame.OPENGL flag,
# it is a good idea to check the value of any requested OpenGL
# attributes. See pygame.display.gl_set_attribute() for a list of
# valid flags.
#
self.fail()
def todo_test_gl_set_attribute(self):
# __doc__ (as of 2008-08-02) for pygame.display.gl_set_attribute:
# pygame.display.gl_set_attribute(flag, value): return None
# request an opengl display attribute for the display mode
#
# When calling pygame.display.set_mode() with the pygame.OPENGL flag,
# Pygame automatically handles setting the OpenGL attributes like
# color and doublebuffering. OpenGL offers several other attributes
# you may want control over. Pass one of these attributes as the flag,
# and its appropriate value. This must be called before
# pygame.display.set_mode()
#
# The OPENGL flags are;
# GL_ALPHA_SIZE, GL_DEPTH_SIZE, GL_STENCIL_SIZE, GL_ACCUM_RED_SIZE,
# GL_ACCUM_GREEN_SIZE, GL_ACCUM_BLUE_SIZE, GL_ACCUM_ALPHA_SIZE,
# GL_MULTISAMPLEBUFFERS, GL_MULTISAMPLESAMPLES, GL_STEREO
self.fail()
def todo_test_iconify(self):
# __doc__ (as of 2008-08-02) for pygame.display.iconify:
# pygame.display.iconify(): return bool
# iconify the display surface
#
# Request the window for the display surface be iconified or hidden.
# Not all systems and displays support an iconified display. The
# function will return True if successfull.
#
# When the display is iconified pygame.display.get_active() will
# return False. The event queue should receive a ACTIVEEVENT event
# when the window has been iconified.
#
self.fail()
def todo_test_init(self):
# __doc__ (as of 2008-08-02) for pygame.display.init:
# pygame.display.init(): return None
# initialize the display module
#
# Initializes the pygame display module. The display module cannot do
# anything until it is initialized. This is usually handled for you
# automatically when you call the higher level pygame.init().
#
# Pygame will select from one of several internal display backends
# when it is initialized. The display mode will be chosen depending on
# the platform and permissions of current user. Before the display
# module is initialized the environment variable SDL_VIDEODRIVER can
# be set to control which backend is used. The systems with multiple
# choices are listed here.
#
# Windows : windib, directx
# Unix : x11, dga, fbcon, directfb, ggi, vgl, svgalib, aalib
# On some platforms it is possible to embed the pygame display into an
# already existing window. To do this, the environment variable
# SDL_WINDOWID must be set to a string containing the window id or
# handle. The environment variable is checked when the pygame display
# is initialized. Be aware that there can be many strange side effects
# when running in an embedded display.
#
# It is harmless to call this more than once, repeated calls have no effect.
self.fail()
def todo_test_list_modes(self):
# __doc__ (as of 2008-08-02) for pygame.display.list_modes:
# pygame.display.list_modes(depth=0, flags=pygame.FULLSCREEN): return list
# get list of available fullscreen modes
#
# This function returns a list of possible dimensions for a specified
# color depth. The return value will be an empty list if no display
# modes are available with the given arguments. A return value of -1
# means that any requested resolution should work (this is likely the
# case for windowed modes). Mode sizes are sorted from biggest to
# smallest.
#
# If depth is 0, SDL will choose the current/best color depth for the
# display. The flags defaults to pygame.FULLSCREEN, but you may need
# to add additional flags for specific fullscreen modes.
#
self.fail()
def todo_test_mode_ok(self):
# __doc__ (as of 2008-08-02) for pygame.display.mode_ok:
# pygame.display.mode_ok(size, flags=0, depth=0): return depth
# pick the best color depth for a display mode
#
# This function uses the same arguments as pygame.display.set_mode().
# It is used to depermine if a requested display mode is available. It
# will return 0 if the display mode cannot be set. Otherwise it will
# return a pixel depth that best matches the display asked for.
#
# Usually the depth argument is not passed, but some platforms can
# support multiple display depths. If passed it will hint to which
# depth is a better match.
#
# The most useful flags to pass will be pygame.HWSURFACE,
# pygame.DOUBLEBUF, and maybe pygame.FULLSCREEN. The function will
# return 0 if these display flags cannot be set.
#
self.fail()
def todo_test_quit(self):
# __doc__ (as of 2008-08-02) for pygame.display.quit:
# pygame.display.quit(): return None
# uninitialize the display module
#
# This will shut down the entire display module. This means any active
# displays will be closed. This will also be handled automatically
# when the program exits.
#
# It is harmless to call this more than once, repeated calls have no effect.
self.fail()
def todo_test_set_caption(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_caption:
# pygame.display.set_caption(title, icontitle=None): return None
# set the current window caption
#
# If the display has a window title, this function will change the
# name on the window. Some systems support an alternate shorter title
# to be used for minimized displays.
#
self.fail()
def todo_test_set_gamma(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_gamma:
# pygame.display.set_gamma(red, green=None, blue=None): return bool
# change the hardware gamma ramps
#
# Set the red, green, and blue gamma values on the display hardware.
# If the green and blue arguments are not passed, they will both be
# the same as red. Not all systems and hardware support gamma ramps,
# if the function succeeds it will return True.
#
# A gamma value of 1.0 creates a linear color table. Lower values will
# darken the display and higher values will brighten.
#
self.fail()
def todo_test_set_gamma_ramp(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_gamma_ramp:
# change the hardware gamma ramps with a custom lookup
# pygame.display.set_gamma_ramp(red, green, blue): return bool
# set_gamma_ramp(red, green, blue): return bool
#
# Set the red, green, and blue gamma ramps with an explicit lookup
# table. Each argument should be sequence of 256 integers. The
# integers should range between 0 and 0xffff. Not all systems and
# hardware support gamma ramps, if the function succeeds it will
# return True.
#
self.fail()
def todo_test_set_icon(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_icon:
# pygame.display.set_icon(Surface): return None
# change the system image for the display window
#
# Sets the runtime icon the system will use to represent the display
# window. All windows default to a simple pygame logo for the window
# icon.
#
# You can pass any surface, but most systems want a smaller image
# around 32x32. The image can have colorkey transparency which will be
# passed to the system.
#
# Some systems do not allow the window icon to change after it has
# been shown. This function can be called before
# pygame.display.set_mode() to create the icon before the display mode
# is set.
#
self.fail()
def todo_test_set_mode(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_mode:
# pygame.display.set_mode(resolution=(0,0), flags=0, depth=0): return Surface
# initialize a window or screen for display
#
# This function will create a display Surface. The arguments passed in
# are requests for a display type. The actual created display will be
# the best possible match supported by the system.
#
# The resolution argument is a pair of numbers representing the width
# and height. The flags argument is a collection of additional
# options. The depth argument represents the number of bits to use
# for color.
#
# The Surface that gets returned can be drawn to like a regular
# Surface but changes will eventually be seen on the monitor.
#
# If no resolution is passed or is set to (0, 0) and pygame uses SDL
# version 1.2.10 or above, the created Surface will have the same size
# as the current screen resolution. If only the width or height are
# set to 0, the Surface will have the same width or height as the
# screen resolution. Using a SDL version prior to 1.2.10 will raise an
# exception.
#
# It is usually best to not pass the depth argument. It will default
# to the best and fastest color depth for the system. If your game
# requires a specific color format you can control the depth with this
# argument. Pygame will emulate an unavailable color depth which can
# be slow.
#
# When requesting fullscreen display modes, sometimes an exact match
# for the requested resolution cannot be made. In these situations
# pygame will select the closest compatable match. The returned
# surface will still always match the requested resolution.
#
# The flags argument controls which type of display you want. There
# are several to choose from, and you can even combine multiple types
# using the bitwise or operator, (the pipe "|" character). If you pass
# 0 or no flags argument it will default to a software driven window.
# Here are the display flags you will want to choose from:
#
# pygame.FULLSCREEN create a fullscreen display
# pygame.DOUBLEBUF recommended for HWSURFACE or OPENGL
# pygame.HWSURFACE hardware accelerated, only in FULLSCREEN
# pygame.OPENGL create an opengl renderable display
# pygame.RESIZABLE display window should be sizeable
# pygame.NOFRAME display window will have no border or controls
self.fail()
def todo_test_set_palette(self):
# __doc__ (as of 2008-08-02) for pygame.display.set_palette:
# pygame.display.set_palette(palette=None): return None
# set the display color palette for indexed displays
#
# This will change the video display color palette for 8bit displays.
# This does not change the palette for the actual display Surface,
# only the palette that is used to display the Surface. If no palette
# argument is passed, the system default palette will be restored. The
# palette is a sequence of RGB triplets.
#
self.fail()
def todo_test_toggle_fullscreen(self):
# __doc__ (as of 2008-08-02) for pygame.display.toggle_fullscreen:
# pygame.display.toggle_fullscreen(): return bool
# switch between fullscreen and windowed displays
#
# Switches the display window between windowed and fullscreen modes.
# This function only works under the unix x11 video driver. For most
# situations it is better to call pygame.display.set_mode() with new
# display flags.
#
self.fail()
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
badloop/SickRage | lib/sqlalchemy/testing/config.py | 76 | 2116 | # testing/config.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import collections
requirements = None
db = None
db_url = None
db_opts = None
file_config = None
_current = None
class Config(object):
def __init__(self, db, db_opts, options, file_config):
self.db = db
self.db_opts = db_opts
self.options = options
self.file_config = file_config
_stack = collections.deque()
_configs = {}
@classmethod
def register(cls, db, db_opts, options, file_config, namespace):
"""add a config as one of the global configs.
If there are no configs set up yet, this config also
gets set as the "_current".
"""
cfg = Config(db, db_opts, options, file_config)
global _current
if not _current:
cls.set_as_current(cfg, namespace)
cls._configs[cfg.db.name] = cfg
cls._configs[(cfg.db.name, cfg.db.dialect)] = cfg
cls._configs[cfg.db] = cfg
@classmethod
def set_as_current(cls, config, namespace):
global db, _current, db_url
_current = config
db_url = config.db.url
namespace.db = db = config.db
@classmethod
def push_engine(cls, db, namespace):
assert _current, "Can't push without a default Config set up"
cls.push(
Config(db, _current.db_opts, _current.options, _current.file_config),
namespace
)
@classmethod
def push(cls, config, namespace):
cls._stack.append(_current)
cls.set_as_current(config, namespace)
@classmethod
def reset(cls, namespace):
if cls._stack:
cls.set_as_current(cls._stack[0], namespace)
cls._stack.clear()
@classmethod
def all_configs(cls):
for cfg in set(cls._configs.values()):
yield cfg
@classmethod
def all_dbs(cls):
for cfg in cls.all_configs():
yield cfg.db
| gpl-3.0 |
knabar/openmicroscopy | components/tools/OmeroWeb/omeroweb/feedback/templatetags/feedback_tags.py | 15 | 1455 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>
#
# Version: 1.0
#
import logging
from django import template
from django.templatetags.static import PrefixNode
register = template.Library()
logger = logging.getLogger(__name__)
@register.tag()
def get_static_feedback_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.FEEDBACK_STATIC_URL``.
Usage::
{% get_static_feedback_prefix [as varname] %}
Examples::
{% get_static_feedback_prefix %}
{% get_static_feedback_prefix as STATIC_FEEDBACK_PREFIX %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_FEEDBACK_URL")
| gpl-2.0 |
perlygatekeeper/glowing-robot | Little_Alchemy_2/Scraper_python/env/lib/python3.7/site-packages/requests/utils.py | 28 | 30176 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict
from .__version__ import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment, Mapping)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
DEFAULT_PORTS = {'http': 80, 'https': 443}
if sys.platform == 'win32':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
try:
if is_py3:
import winreg
else:
import _winreg as winreg
except ImportError:
return False
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
proxyEnable = int(winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0])
# ProxyOverride is almost always a string
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no useable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See https://bugs.python.org/issue20164 &
# https://github.com/psf/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def extract_zipped_paths(path):
"""Replace nonexistent paths that look like they refer to a member of a zip
archive with the location of an extracted copy of the target, or else
just return the provided path unchanged.
"""
if os.path.exists(path):
# this is already a valid path, no need to do anything further
return path
# find the first valid part of the provided path and treat that as a zip archive
# assume the rest of the path is the name of a member in the archive
archive, member = os.path.split(path)
while archive and not os.path.exists(archive):
archive, prefix = os.path.split(archive)
member = '/'.join([prefix, member])
if not zipfile.is_zipfile(archive):
return path
zip_file = zipfile.ZipFile(archive)
if member not in zip_file.namelist():
return path
# we have a valid zip archive and a valid member of that archive
tmp = tempfile.gettempdir()
extracted_path = os.path.join(tmp, *member.split('/'))
if not os.path.exists(extracted_path):
extracted_path = zip_file.extract(member, path=tmp)
return extracted_path
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def _parse_content_type_header(header):
"""Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
"""
tokens = header.split(';')
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1:].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = _parse_content_type_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
# Prioritize lowercase environment variables over uppercase
# to keep a consistent behaviour with other http projects (curl, wget).
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy('no_proxy')
parsed = urlparse(url)
if parsed.hostname is None:
# URLs don't always have hostnames, e.g. file:/// urls.
return True
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the hostname, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
if is_ipv4_address(parsed.hostname):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(parsed.hostname, proxy_ip):
return True
elif parsed.hostname == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
host_with_port = parsed.hostname
if parsed.port:
host_with_port += ':{}'.format(parsed.port)
for host in no_proxy:
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
with set_environ('no_proxy', no_proxy_arg):
# parsed.hostname can be `None` in cases such as a file URI.
try:
bypass = proxy_bypass(parsed.hostname)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy=None):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Value for header {%s: %s} must be of type str or "
"bytes, not %s" % (name, value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
| artistic-2.0 |
getredash/redash | redash/authentication/remote_user_auth.py | 3 | 1814 | import logging
from flask import redirect, url_for, Blueprint, request
from redash.authentication import (
create_and_login_user,
logout_and_redirect_to_index,
get_next_path,
)
from redash.authentication.org_resolving import current_org
from redash.handlers.base import org_scoped_rule
from redash import settings
logger = logging.getLogger("remote_user_auth")
blueprint = Blueprint("remote_user_auth", __name__)
@blueprint.route(org_scoped_rule("/remote_user/login"))
def login(org_slug=None):
unsafe_next_path = request.args.get("next")
next_path = get_next_path(unsafe_next_path)
if not settings.REMOTE_USER_LOGIN_ENABLED:
logger.error(
"Cannot use remote user for login without being enabled in settings"
)
return redirect(url_for("redash.index", next=next_path, org_slug=org_slug))
email = request.headers.get(settings.REMOTE_USER_HEADER)
# Some Apache auth configurations will, stupidly, set (null) instead of a
# falsey value. Special case that here so it Just Works for more installs.
# '(null)' should never really be a value that anyone wants to legitimately
# use as a redash user email.
if email == "(null)":
email = None
if not email:
logger.error(
"Cannot use remote user for login when it's not provided in the request (looked in headers['"
+ settings.REMOTE_USER_HEADER
+ "'])"
)
return redirect(url_for("redash.index", next=next_path, org_slug=org_slug))
logger.info("Logging in " + email + " via remote user")
user = create_and_login_user(current_org, email, email)
if user is None:
return logout_and_redirect_to_index()
return redirect(next_path or url_for("redash.index", org_slug=org_slug), code=302)
| bsd-2-clause |
yurac/python-docx | docx/image/image.py | 12 | 8029 | # encoding: utf-8
"""
Provides objects that can characterize image streams as to content type and
size, as a required step in including them in a document.
"""
from __future__ import absolute_import, division, print_function
import hashlib
import os
from ..compat import BytesIO, is_string
from .exceptions import UnrecognizedImageError
from ..shared import Emu, Inches, lazyproperty
class Image(object):
"""
Graphical image stream such as JPEG, PNG, or GIF with properties and
methods required by ImagePart.
"""
def __init__(self, blob, filename, image_header):
super(Image, self).__init__()
self._blob = blob
self._filename = filename
self._image_header = image_header
@classmethod
def from_blob(cls, blob):
"""
Return a new |Image| subclass instance parsed from the image binary
contained in *blob*.
"""
stream = BytesIO(blob)
return cls._from_stream(stream, blob)
@classmethod
def from_file(cls, image_descriptor):
"""
Return a new |Image| subclass instance loaded from the image file
identified by *image_descriptor*, a path or file-like object.
"""
if is_string(image_descriptor):
path = image_descriptor
with open(path, 'rb') as f:
blob = f.read()
stream = BytesIO(blob)
filename = os.path.basename(path)
else:
stream = image_descriptor
stream.seek(0)
blob = stream.read()
filename = None
return cls._from_stream(stream, blob, filename)
@property
def blob(self):
"""
The bytes of the image 'file'
"""
return self._blob
@property
def content_type(self):
"""
MIME content type for this image, e.g. ``'image/jpeg'`` for a JPEG
image
"""
return self._image_header.content_type
@lazyproperty
def ext(self):
"""
The file extension for the image. If an actual one is available from
a load filename it is used. Otherwise a canonical extension is
assigned based on the content type. Does not contain the leading
period, e.g. 'jpg', not '.jpg'.
"""
return os.path.splitext(self._filename)[1][1:]
@property
def filename(self):
"""
Original image file name, if loaded from disk, or a generic filename
if loaded from an anonymous stream.
"""
return self._filename
@property
def px_width(self):
"""
The horizontal pixel dimension of the image
"""
return self._image_header.px_width
@property
def px_height(self):
"""
The vertical pixel dimension of the image
"""
return self._image_header.px_height
@property
def horz_dpi(self):
"""
Integer dots per inch for the width of this image. Defaults to 72
when not present in the file, as is often the case.
"""
return self._image_header.horz_dpi
@property
def vert_dpi(self):
"""
Integer dots per inch for the height of this image. Defaults to 72
when not present in the file, as is often the case.
"""
return self._image_header.vert_dpi
@property
def width(self):
"""
A |Length| value representing the native width of the image,
calculated from the values of `px_width` and `horz_dpi`.
"""
return Inches(self.px_width / self.horz_dpi)
@property
def height(self):
"""
A |Length| value representing the native height of the image,
calculated from the values of `px_height` and `vert_dpi`.
"""
return Inches(self.px_height / self.vert_dpi)
def scaled_dimensions(self, width=None, height=None):
"""
Return a (cx, cy) 2-tuple representing the native dimensions of this
image scaled by applying the following rules to *width* and *height*.
If both *width* and *height* are specified, the return value is
(*width*, *height*); no scaling is performed. If only one is
specified, it is used to compute a scaling factor that is then
applied to the unspecified dimension, preserving the aspect ratio of
the image. If both *width* and *height* are |None|, the native
dimensions are returned. The native dimensions are calculated using
the dots-per-inch (dpi) value embedded in the image, defaulting to 72
dpi if no value is specified, as is often the case. The returned
values are both |Length| objects.
"""
if width is None and height is None:
return self.width, self.height
if width is None:
scaling_factor = float(height) / float(self.height)
width = round(self.width * scaling_factor)
if height is None:
scaling_factor = float(width) / float(self.width)
height = round(self.height * scaling_factor)
return Emu(width), Emu(height)
@lazyproperty
def sha1(self):
"""
SHA1 hash digest of the image blob
"""
return hashlib.sha1(self._blob).hexdigest()
@classmethod
def _from_stream(cls, stream, blob, filename=None):
"""
Return an instance of the |Image| subclass corresponding to the
format of the image in *stream*.
"""
image_header = _ImageHeaderFactory(stream)
if filename is None:
filename = 'image.%s' % image_header.default_ext
return cls(blob, filename, image_header)
def _ImageHeaderFactory(stream):
"""
Return a |BaseImageHeader| subclass instance that knows how to parse the
headers of the image in *stream*.
"""
from docx.image import SIGNATURES
def read_32(stream):
stream.seek(0)
return stream.read(32)
header = read_32(stream)
for cls, offset, signature_bytes in SIGNATURES:
end = offset + len(signature_bytes)
found_bytes = header[offset:end]
if found_bytes == signature_bytes:
return cls.from_stream(stream)
raise UnrecognizedImageError
class BaseImageHeader(object):
"""
Base class for image header subclasses like |Jpeg| and |Tiff|.
"""
def __init__(self, px_width, px_height, horz_dpi, vert_dpi):
self._px_width = px_width
self._px_height = px_height
self._horz_dpi = horz_dpi
self._vert_dpi = vert_dpi
@property
def content_type(self):
"""
Abstract property definition, must be implemented by all subclasses.
"""
msg = (
'content_type property must be implemented by all subclasses of '
'BaseImageHeader'
)
raise NotImplementedError(msg)
@property
def default_ext(self):
"""
Default filename extension for images of this type. An abstract
property definition, must be implemented by all subclasses.
"""
msg = (
'default_ext property must be implemented by all subclasses of '
'BaseImageHeader'
)
raise NotImplementedError(msg)
@property
def px_width(self):
"""
The horizontal pixel dimension of the image
"""
return self._px_width
@property
def px_height(self):
"""
The vertical pixel dimension of the image
"""
return self._px_height
@property
def horz_dpi(self):
"""
Integer dots per inch for the width of this image. Defaults to 72
when not present in the file, as is often the case.
"""
return self._horz_dpi
@property
def vert_dpi(self):
"""
Integer dots per inch for the height of this image. Defaults to 72
when not present in the file, as is often the case.
"""
return self._vert_dpi
| mit |
myarjunar/inasafe | safe/definitions/keyword_properties.py | 1 | 9961 | # coding=utf-8
"""Definitions relating to layer keywords."""
from safe.utilities.i18n import tr
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
# Base Metadata Property
property_organisation = {
'key': 'property_organisation',
'name': tr('Organisation'),
'description': tr('An organized body of people who own the layer.')
}
property_email = {
'key': 'property_email',
'name': tr('Organisation'),
'description': tr('The email address of the author of the layer.')
}
property_date = {
'key': 'property_date',
'name': tr('Date'),
'description': tr('The date when the layer is created.')
}
property_abstract = {
'key': 'property_abstract',
'name': tr('Abstract'),
'description': tr(
'A brief narrative summary of the content of the layer.')
}
property_title = {
'key': 'property_title',
'name': tr('Title'),
'description': tr('A name of the layer.')
}
property_license = {
'key': 'property_license',
'name': tr('License'),
'description': tr('A permit from an authority to use the layer.')
}
property_url = {
'key': 'property_url',
'name': tr('URL'),
'description': tr(
'The address of World Web Page where we can find the layer or its '
'description.')
}
property_layer_purpose = {
'key': 'property_layer_purpose',
'name': tr('Layer Purpose'),
'description': tr(
'The purpose of the layer, it can be hazard layer, exposure layer, or '
'aggregation layer.')
}
property_layer_mode = {
'key': 'property_layer_mode',
'name': tr('Layer Mode'),
'description': tr(
'The mode of the layer, it can be continuous or classified layer.')
}
property_layer_geometry = {
'key': 'property_layer_geometry',
'name': tr('Layer Geometry'),
'description': tr(
'The geometry type of the layer, it can be point, line, polygon, '
'or raster.')
}
property_keyword_version = {
'key': 'property_keyword_version',
'name': tr('Keyword Version'),
'description': tr(
'The version of the keywords for example 3.5 or 4.0. It depends on '
'the InaSAFE version and has backward compatibility for some version.')
}
property_scale = {
'key': 'property_scale',
'name': tr('Scale'),
'description': tr('The default scale of the layer.')
}
property_source = {
'key': 'property_source',
'name': tr('Source'),
'description': tr('The location of where does the layer comes from.')
}
property_inasafe_fields = {
'key': 'property_inasafe_fields',
'name': tr('InaSAFE Fields'),
'description': tr(
'The mapping of field to a field concept in InaSAFE. More than one '
'field can be mapped to the same field concept. It is stored as a '
'dictionary format where field concept key is the key of the '
'dictionary. And the value will be the list of fields that mapped '
'into the field concept.')
}
property_inasafe_default_values = {
'key': 'property_inasafe_default_values',
'name': tr('InaSAFE Default Values'),
'description': tr(
'If a field concept in InaSAFE does not have field to be mapped, '
'InaSAFE default values can be used to set a default value for a '
'field concept in InaSAFE. One field concept can only have one '
'default value. It is stored as dictionary where field concept key is '
'the key of the dictionary and the default value will be the value of '
'that key.')
}
# Exposure Layer Metadata Properties
property_exposure = {
'key': 'property_exposure',
'name': tr('Exposure'),
'description': tr('The type of exposure that the layer represents.')
}
property_exposure_unit = {
'key': 'property_exposure_unit',
'name': tr('Exposure Unit'),
'description': tr('The unit of the exposure that the layer represents.')
}
property_classification = {
'key': 'property_classification',
'name': tr('Exposure Classification'),
'description': tr(
'The classification of the exposure type. Some of the available '
'values are generic_structure_classes, generic_road_classes, or '
'data_driven_classes.')
}
property_value_map = {
'key': 'property_value_map',
'name': tr('Exposure Value Map'),
'description': tr(
'The mapping of class\'s key of the classification to some '
'unique values.')
}
property_active_band = {
'key': 'property_active_band',
'name': tr('Active Band'),
'description': tr(
'Active band indicate which band of the layer that contains the data '
'that the user want to use. The default value is the first band. It '
'is only applied for multi band dataset.')
}
# Hazard Layer Metadata Properties
property_hazard = {
'key': 'property_hazard',
'name': tr('Hazard'),
'description': tr('The type of hazard that the layer represents.')
}
property_hazard_category = {
'key': 'property_hazard_category',
'name': tr('Hazard Category'),
'description': tr(
'The category of the hazard that the layer represents. It can be '
'single event or multiple event.')
}
property_continuous_hazard_unit = {
'key': 'property_continuous_hazard_unit',
'name': tr('Continuous Hazard Unit'),
'description': tr('A unit for continuous hazard.')
}
property_value_maps = {
'key': 'property_value_maps',
'name': tr('Hazard Value Maps'),
'description': tr(
'A collection of value mapping for each exposure type. Where exposure '
'type key is the key. For each exposure type, there is one or more '
'classifications and its value mapping (to indicate which class a '
'value mapped into). There is a flag `active` to indicate which '
'classification is the active one.')
# Sample
# value_maps = {
# 'land_cover': {
# 'flood_hazard_classes': {
# 'active': False,
# 'classes': {
# 'dry': ['No', 'NO', 'dry'],
# 'wet': ['Yes', 'YES', 'Very wet', 'wet'],
# }
# },
# 'flood_petabencana_hazard_classes': {
# 'active': True,
# 'classes': {
# 'low': ['No', 'NO', 'dry'],
# 'high': ['Yes', 'YES', 'wet'],
# 'very_high': ['Very wet', ]
# },
# }
# },
# 'road': {
# 'flood_hazard_classes': {
# 'active': True,
# 'classes': {
# 'dry': ['No', 'NO', 'dry'],
# 'wet': ['Yes', 'YES', 'Very wet', 'wet'],
# }
# },
# 'flood_petabencana_hazard_classes': {
# 'active': False,
# 'classes': {
# 'low': ['No', 'NO', 'dry'],
# 'high': ['Yes', 'YES', 'wet'],
# 'very_high': ['Very wet', ]
# },
# }
# }
# }
}
property_thresholds = {
'key': 'property_thresholds',
'name': tr('Hazard Thresholds'),
'description': tr(
'A collection of thresholds for each exposure type. Where exposure '
'type key is the key. For each exposure type, there is one or more '
'classifications and its thresholds (to indicate which class a '
'range of value mapped into). The range consists of minimum value and '
'maximum value in list. Minimum value is excluded while maximum '
'value is included in the range. There is a flag `active` to '
'indicate which classification is the active one.')
# Sample
# value_maps = {
# 'land_cover': {
# 'flood_hazard_classes': {
# 'active': False,
# 'classes': {
# 'dry': [0, 1],
# 'wet': [1, 999],
# }
# },
# 'flood_petabencana_hazard_classes': {
# 'active': True,
# 'classes': {
# 'low': [0, 1],
# 'high': [1, 5],
# 'very_high': [5, 999]
# },
# }
# },
# 'road': {
# 'flood_hazard_classes': {
# 'active': True,
# 'classes': {
# 'dry': [0, 1],
# 'wet': [1, 999],
# }
# },
# 'flood_petabencana_hazard_classes': {
# 'active': False,
# 'classes': {
# 'low': [0, 1],
# 'high': [1, 4],
# 'very_high': [4, 999]
# },
# }
# }
# }
}
# Output Layer Metadata Property
property_exposure_keywords = {
'key': 'property_exposure_keywords',
'name': tr('Exposure Keywords'),
'description': tr(
'A copy of original exposure keywords in the output\'s analysis.'
)
}
property_hazard_keywords = {
'key': 'property_hazard_keywords',
'name': tr('Hazard Keywords'),
'description': tr(
'A copy of original hazard keywords in the output\'s analysis.'
)
}
property_aggregation_keywords = {
'key': 'property_aggregation_keywords',
'name': tr('Aggregation Keywords'),
'description': tr(
'A copy of original aggregation keywords in the output\'s analysis.'
)
}
property_provenance_data = {
'key': 'property_provenance_data',
'name': tr('Provenance Data'),
'description': tr(
'A collection of provenance of the analysis as dictionary.'
)
}
property_extra_keywords = {
'key': 'extra_keywords',
'name': tr('Extra Keywords'),
'description': tr(
'A collection of extra keyword for creating richer report.'
)
}
| gpl-3.0 |
syllog1sm/TextBlob | text/nltk/tokenize/__init__.py | 2 | 3537 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <stevenbird1@gmail.com> (minor additions)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
r"""
NLTK Tokenizer Package
Tokenizers divide strings into lists of substrings. For example,
tokenizers can be used to find the list of sentences or words in a
string.
>>> from nltk.tokenize import word_tokenize, wordpunct_tokenize, sent_tokenize
>>> s = '''Good muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.'''
>>> wordpunct_tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
>>> sent_tokenize(s)
['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.']
>>> [word_tokenize(t) for t in sent_tokenize(s)]
[['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'],
['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']]
Caution: only use ``word_tokenize()`` on individual sentences.
Caution: when tokenizing a Unicode string, make sure you are not
using an encoded version of the string (it may be necessary to
decode it first, e.g. with ``s.decode("utf8")``.
NLTK tokenizers can produce token-spans, represented as tuples of integers
having the same semantics as string slices, to support efficient comparison
of tokenizers. (These methods are implemented as generators.)
>>> from nltk.tokenize import WhitespaceTokenizer
>>> list(WhitespaceTokenizer().span_tokenize(s))
[(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44),
(45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)]
There are numerous ways to tokenize text. If you need more control over
tokenization, see the other methods provided in this package.
For further information, please see Chapter 3 of the NLTK book.
"""
from nltk.data import load
from nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,
line_tokenize)
from nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,
BlanklineTokenizer, WordPunctTokenizer,
wordpunct_tokenize, regexp_tokenize,
blankline_tokenize)
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktWordTokenizer
from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
from nltk.tokenize.treebank import TreebankWordTokenizer
from nltk.tokenize.texttiling import TextTilingTokenizer
# Standard sentence tokenizer.
def sent_tokenize(text):
"""
Return a sentence-tokenized copy of *text*,
using NLTK's recommended sentence tokenizer
(currently :class:`.PunktSentenceTokenizer`).
"""
tokenizer = load('tokenizers/punkt/english.pickle')
return tokenizer.tokenize(text)
# Standard word tokenizer.
_word_tokenize = TreebankWordTokenizer().tokenize
def word_tokenize(text):
"""
Return a tokenized copy of *text*,
using NLTK's recommended word tokenizer
(currently :class:`.TreebankWordTokenizer`).
This tokenizer is designed to work on a sentence at a time.
"""
return _word_tokenize(text)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| mit |
olapaola/olapaola-android-scripting | python/src/Lib/multiprocessing/managers.py | 52 | 34804 | #
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import Queue
from traceback import format_exc
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler
from multiprocessing.util import Finalize, info
try:
from cPickle import PicklingError
except ImportError:
from pickle import PicklingError
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=5)
self.address = self.listener.address
self.id_to_obj = {0: (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception, e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception, e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception, e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception, e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
if ident != 0:
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident=0
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %d', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return type(self).from_address, \
(self._address, self._authkey, self._serializer)
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer):
'''
Create a server, report its address and run it
'''
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in method_to_typeid.items():
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception, e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception, e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec '''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
# XXX remove methods for Py3.0 and Py2.6
_exposed_ = ('__next__', 'next', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def next(self, *args):
return self._callmethod('next', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
# XXX will Condition.notfyAll() name be available in Py3.0?
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', Queue.Queue)
SyncManager.register('JoinableQueue', Queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
| apache-2.0 |
suhe/odoo | addons/hr_payroll/wizard/hr_payroll_contribution_register_report.py | 47 | 1128 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from datetime import datetime
from dateutil import relativedelta
from openerp.osv import fields, osv
class payslip_lines_contribution_register(osv.osv_memory):
_name = 'payslip.lines.contribution.register'
_description = 'PaySlip Lines by Contribution Registers'
_columns = {
'date_from': fields.date('Date From', required=True),
'date_to': fields.date('Date To', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'date_to': lambda *a: str(datetime.now() + relativedelta.relativedelta(months=+1, day=1, days=-1))[:10],
}
def print_report(self, cr, uid, ids, context=None):
datas = {
'ids': context.get('active_ids', []),
'model': 'hr.contribution.register',
'form': self.read(cr, uid, ids, context=context)[0]
}
return self.pool['report'].get_action(
cr, uid, [], 'hr_payroll.report_contributionregister', data=datas, context=context
)
| gpl-3.0 |
naibaf7/PyGreentea | examples/2D_usk_malis_softmax/test.py | 2 | 1619 | from __future__ import print_function
import sys, os, math
import numpy as np
import h5py
from numpy import float32, int32, uint8, dtype
from PIL import Image
import glob
# Load PyGreentea
# Relative path to where PyGreentea resides
pygt_path = '../..'
sys.path.append(pygt_path)
import pygreentea.pygreentea as pygt
from pygreentea.pygreentea import malis
# Load the datasets - individual tiff files in a directory
raw_dir = '../../../project_data/dataset_01/train/raw'
raw_path = sorted(glob.glob(raw_dir+'/*.tif'))
num_images = len(raw_path)
raw_ds = [np.expand_dims(pygt.normalize(np.array(Image.open(raw_path[i]).convert('L'), 'f')),0) for i in range(0, num_images)]
datasets = []
for i in range(0,len(raw_ds)):
dataset = {}
dataset['data'] = raw_ds[i]
datasets += [dataset]
test_net_file = 'net.prototxt'
test_device = 0
pygt.caffe.set_devices((test_device,))
caffemodels = pygt.get_caffe_models('net')
test_net = pygt.init_testnet(test_net_file, trained_model=caffemodels[-1][1], test_device=test_device)
def process_data_slice_callback(input_specs, batch_size, dataset_indexes, offsets, dataset_combined_sizes, data_arrays, slices):
# Nothing to process here
pass
output_arrays = []
pygt.process(test_net, datasets, ['aff_pred', 'smax_pred'], output_arrays, process_data_slice_callback)
for i in range(0, len(output_arrays)):
for key in output_arrays[i].keys():
outhdf5 = h5py.File('output/' + key + str(i) + '.h5', 'w')
outdset = outhdf5.create_dataset('main', np.shape(output_arrays[i][key]), np.float32, data=output_arrays[i][key])
outhdf5.close() | bsd-2-clause |
dineshappavoo/ctgi | src/com/ctgi/google/problems/questions/Apple_interview_questions.gl.py | 1 | 1710 | ALGORITHMS:
1. Write code to sum 2 integer but u cant use a+b method, you have to use either ++ or --. How you will handle negative numbers [com.ctgi.google.problems.SumIntegersUsingSpecialOperator]
2. There are several words in a file. Get the occurrence of every word and sort it based on the occurrence, if more than one word is having same occurrence than sort it alphabetically. [com.ctgi.google.problems.SortListBasedOnOccurrance]
TEST
1.
DESIGN:
1. How can we divide a large file between multi threads to process it? If we are running a multi threaded application and input is a large file and we have to provide each thread a part of the file to make it fast. How we can achieve it in java?
ANSWER:
1.Use producer consumer problem design
2.use fork/join, keep dividing the file based on lines and invoking the fork.
Was to partition data in threads/pipelines
1) Round robin (fastest and uniformly distributed)
2) Randomly assign to each thread (uniformly distributed but relatively slow as we have to generate random numbers)
3) Hashing (good to group duplicates together but slow because of hashing)
2. There is a test automation running and fails 14%, say 1/7 times? How you will debug? There is no code change or test code change.
ANSWER:Look at the failures in the logs, in general, it will be the health of the server to be checked, if the server performance is slow(timeout errors, ObjectNotFound Exceptions), if there is an OutOfMemoryException is thrown in one of the test case or even outside this testSuite, need to be cleaned out.
References:
1. http://steve-yegge.blogspot.com/2008/03/get-that-job-at-google.html | mit |
urbansearchTUD/UrbanSearch | tests/utils/test_db_utils.py | 1 | 10060 | import time
import pytest
import config
from urbansearch.utils import db_utils
OCCURS_IN = config.get('neo4j', 'occurs_in_name')
RELATES_TO = config.get('neo4j', 'relates_to_name')
if not ('TEST' in OCCURS_IN and 'TEST' in RELATES_TO):
raise ValueError('Not adjusting production DB! {} {}'.format(RELATES_TO,
OCCURS_IN))
@pytest.fixture
def clean_neo4j_index_and_rel(request):
# Cleans all created relations to index named 'test*.gz'
def clean():
db_utils.perform_query('''
MATCH (:City)-[r:{}]->(n:Index)
WHERE n.filename STARTS WITH 'test'
DELETE r, n'''.format(OCCURS_IN))
request.addfinalizer(clean)
@pytest.fixture
def clean_neo4j_index(request):
# Cleans all created relations to index named 'test*.gz'
def clean():
db_utils.perform_query('''
MATCH (n:Index)
WHERE n.filename STARTS WITH 'test'
DELETE n''')
request.addfinalizer(clean)
@pytest.fixture
def clean_neo4j_ic_rel(request):
# Cleans all created relations labeled RELATES_TO
def clean_ic_rel():
db_utils.perform_query('''
MATCH (:City)-[r:{}]->(:City)
DELETE r'''.format(RELATES_TO), None)
request.addfinalizer(clean_ic_rel)
def _create_test_index(digest='unique_string'):
index = {'digest': digest, 'filename': 'test.gz', 'length': 10, 'offset': 12}
assert db_utils.store_index(index)
return index['digest']
def test_connected_to_db():
assert db_utils.connected_to_db()
def test_city_names():
actual = db_utils.city_names()
expected = ['Amsterdam', 'Rotterdam', 'Den Haag', 'Appingedam']
assert actual == expected
def test_city_population():
actual = db_utils.city_population('Amsterdam')
expected = 697835
assert actual == expected
def test_city_distance_diff():
small_dist = db_utils.city_distance('Den Haag', 'Rotterdam')
large_dist = db_utils.city_distance('Appingedam', 'Rotterdam')
assert small_dist < large_dist
def test_city_distance_eq():
a_to_b = db_utils.city_distance('Den Haag', 'Rotterdam')
b_to_a = db_utils.city_distance('Rotterdam', 'Den Haag')
assert a_to_b == b_to_a
def test_city_haversine_distance_diff():
small_dist = db_utils.city_haversine_distance('Den Haag', 'Rotterdam')
large_dist = db_utils.city_haversine_distance('Amsterdam', 'Rotterdam')
assert small_dist < large_dist
def test_city_haversine_distance_eq():
a_to_b = db_utils.city_haversine_distance('Den Haag', 'Rotterdam')
b_to_a = db_utils.city_haversine_distance('Rotterdam', 'Den Haag')
assert a_to_b == b_to_a
def test_city_haversine_distance_num():
dist = db_utils.city_haversine_distance('Rotterdam', 'Amsterdam')
assert 55 < dist < 60
def test_invalid_query():
assert db_utils.perform_query('MATCH (n:City)') is None
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_single_index():
index = {'digest': 'unique_string', 'filename': 'test.gz',
'length': 10, 'offset': 12}
assert db_utils.store_index(index)
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_multi_index():
indices = [
{'digest': 'unique_string', 'filename': 'test.gz',
'length': 10, 'offset': 12},
{'digest': 'unique_string2', 'filename': 'test2.gz',
'length': 11, 'offset': 13}
]
assert db_utils.store_indices(indices)
@pytest.mark.usefixtures('clean_neo4j_index_and_rel')
def test_store_single_occurrence():
digest = _create_test_index()
city = 'Amsterdam'
assert db_utils.store_occurrence(digest, [city])
@pytest.mark.usefixtures('clean_neo4j_index_and_rel')
def test_store_multi_occurrence():
indices = [
{'digest': 'unique_string', 'filename': 'test.gz',
'length': 10, 'offset': 12},
{'digest': 'unique_string2', 'filename': 'test2.gz',
'length': 11, 'offset': 13}
]
db_utils.store_indices(indices)
digests = ['unique_string', 'unique_string2']
occurrences = [['Amsterdam', 'Rotterdam'], ['Appingedam']]
assert db_utils.store_occurrences(digests, occurrences)
@pytest.mark.usefixtures('clean_neo4j_ic_rel')
def test_store_intercity_relation():
assert db_utils.store_ic_rel('Amsterdam', 'Rotterdam')
def test_get_intercity_relation_none():
assert not db_utils.get_ic_rel('Rotterdam', 'Amsterdam', 0)
@pytest.mark.usefixtures('clean_neo4j_ic_rel')
def test_get_intercity_relation():
expected = {
'commuting': 0,
'shopping': 0,
'leisure': 0,
'residential_mobility': 0,
'education': 0,
'collaboration': 0,
'transportation': 0,
'other': 0,
'total': 0
}
db_utils.store_ic_rel('Rotterdam', 'Amsterdam')
assert db_utils.get_ic_rel('Rotterdam', 'Amsterdam', 0) is None
@pytest.mark.usefixtures('clean_neo4j_ic_rel')
def test_get_intercity_relation_multi():
d = {
'commuting': 0,
'shopping': 0,
'leisure': 0,
'residential_mobility': 0,
'education': 0,
'collaboration': 0,
'transportation': 0,
'other': 0,
'total': 0
}
expected = [d, d]
db_utils.store_ic_rels([('Rotterdam', 'Amsterdam'),
('Den Haag', 'Appingedam')])
assert db_utils.get_ic_rels([('Rotterdam', 'Amsterdam'),
('Den Haag', 'Appingedam')], 0) == [None, None]
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_index_topics_single():
index = _create_test_index()
topics = ['economy']
assert db_utils.store_index_topics(index, topics)
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_index_topics_multi():
index = _create_test_index()
topics = ['economy', 'commuting']
assert db_utils.store_index_topics(index, topics)
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_index_topics_empty():
index = _create_test_index()
topics = []
assert not db_utils.store_index_topics(index, topics)
topics = None
assert not db_utils.store_index_topics(index, topics)
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_indices_topics():
indices = [_create_test_index(), _create_test_index('test2.gz')]
topics = [['Economy', 'Trade'], []]
assert db_utils.store_indices_topics(indices, topics)
@pytest.mark.usefixtures('clean_neo4j_index')
def test_get_index_probabilities():
index = _create_test_index()
db_utils.store_index_probabilities()
expected = {
'commuting': 0,
'shopping': 0,
'leisure': 0,
'residential_mobility': 0,
'education': 0,
'collaboration': 0,
'transportation': 0,
}
assert db_utils.get_index_probabilities(index) == expected
@pytest.mark.usefixtures('clean_neo4j_index')
def test_get_index_probabilities():
indices = [_create_test_index(), _create_test_index('test2.gz')]
db_utils.store_indices_probabilities(indices, [None, None])
probabilities = {
'commuting': 0,
'shopping': 0,
'leisure': 0,
'residential_mobility': 0,
'education': 0,
'collaboration': 0,
'transportation': 0,
}
expected = [probabilities, probabilities]
assert db_utils.get_indices_probabilities(indices) == expected
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_index_probabilities_default():
index = _create_test_index()
assert db_utils.store_index_probabilities(index, None)
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_index_probabilities_full():
digest = _create_test_index()
probabilities = {
'commuting': 0.5,
'shopping': 0.13,
'leisure': 0.12,
'residential_mobility': 0.11,
'education': 0.15,
'collaboration': 0.16,
'transportation': 0.17,
}
assert db_utils.store_index_probabilities(digest, probabilities)
assert db_utils.get_index_probabilities(digest) == probabilities
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_index_probabilities_with_update():
index = _create_test_index()
probabilities = {
'commuting': 0.5,
'shopping': 0.13,
'leisure': 0.12,
'residential_mobility': 0.11
}
update = {'commuting': 0.6}
expected = {
'commuting': 0.6,
'shopping': 0.13,
'leisure': 0.12,
'residential_mobility': 0.11,
'education': 0,
'collaboration': 0,
'transportation': 0,
}
assert db_utils.store_index_probabilities(index, probabilities)
assert db_utils.store_index_probabilities(index, update)
assert db_utils.get_index_probabilities(index) == expected
@pytest.mark.usefixtures('clean_neo4j_index')
def test_store_indices_probabilities():
indices = [_create_test_index(), _create_test_index('test2.gz')]
values = {
'commuting': 0.6,
'shopping': 0.13,
'leisure': 0.12,
'residential_mobility': 0.11,
'education': 0,
'collaboration': 0,
'transportation': 0,
}
expected = [values, values]
assert db_utils.store_indices_probabilities(indices, expected)
@pytest.mark.usefixtures('clean_neo4j_index')
def test_get_index_topics():
index = _create_test_index()
db_utils.store_index_topics(index, ['Economy'])
assert db_utils.get_index_topics(index) == ['Economy']
@pytest.mark.usefixtures('clean_neo4j_index')
def test_get_indices_topics():
indices = [_create_test_index(), _create_test_index('test2.gz')]
db_utils.store_indices_topics(indices, [['Economy'], []])
assert db_utils.get_indices_topics(indices) == [['Economy'], []]
def test_compute_ic_relations():
index = _create_test_index()
occurrences = ['Amsterdam', 'Rotterdam']
db_utils.store_occurrence(index, occurrences)
assert len(db_utils.compute_ic_relations(cities=['Amsterdam'])) > 0
| gpl-3.0 |
dabercro/CrombieTools | python/CrombieTools/AnalysisTools/LimitTreeMaker.py | 1 | 1169 | """
@todo Clean up the LimitTreeMaker python file to not depend on these extra variables in cuts.py
"""
import os
from .. import Load, DirFromEnv
newLimitTreeMaker = Load('LimitTreeMaker')
def SetupFromEnv(ltm):
"""A function that sets up the LimitTreeMaker after sourcing a config file
@param ltm The LimitTreeMaker object to set up
"""
from ..CommonTools.FileConfigReader import SetupConfigFromEnv, SetFunctionFromEnv, LoadConfig
SetupConfigFromEnv(ltm)
DirFromEnv('CrombieOutLimitTreeDir')
SetFunctionFromEnv([
(ltm.SetOutDirectory, 'CrombieOutLimitTreeDir'),
])
for region in LoadConfig.cuts.regions:
if os.environ.get('CrombieExcept_' + region):
ltm.ReadExceptionConfig(os.environ['CrombieExcept_' + region], region)
def SetCuts(ltm, category):
from .. import LoadConfig
cuts = LoadConfig.cuts
for region in cuts.regions:
ltm.AddRegion(region,cuts.cut(category, region))
if region in cuts.additionKeys:
ltm.AddExceptionDataCut(region, cuts.additions[region][0])
ltm.AddExceptionWeightBranch(region, cuts.additions[region][1])
| mit |
amenonsen/ansible | lib/ansible/module_utils/network/vyos/argspec/l3_interfaces/l3_interfaces.py | 23 | 2969 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the vyos_l3_interfaces module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class L3_interfacesArgs(object): # pylint: disable=R0903
"""The arg spec for the vyos_l3_interfaces module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
'config': {
'elements': 'dict',
'options': {
'ipv4': {
'elements': 'dict',
'options': {
'address': {
'type': 'str'
}
},
'type': 'list'
},
'ipv6': {
'elements': 'dict',
'options': {
'address': {
'type': 'str'
}
},
'type': 'list'
},
'name': {
'required': True,
'type': 'str'
},
'vifs': {
'elements': 'dict',
'options': {
'ipv4': {
'elements': 'dict',
'options': {
'address': {
'type': 'str'
}
},
'type': 'list'
},
'ipv6': {
'elements': 'dict',
'options': {
'address': {
'type': 'str'
}
},
'type': 'list'
},
'vlan_id': {
'type': 'int'
}
},
'type': 'list'
}
},
'type': 'list'
},
'state': {
'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged',
'type': 'str'
}
} # pylint: disable=C0301
| gpl-3.0 |
fmacias64/deap | examples/es/cma_plotting.py | 12 | 4326 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import numpy
from deap import algorithms
from deap import base
from deap import benchmarks
from deap import cma
from deap import creator
from deap import tools
import matplotlib.pyplot as plt
# Problem size
N = 10
NGEN = 125
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
def main(verbose=True):
# The cma module uses the numpy random number generator
numpy.random.seed(64)
# The CMA-ES algorithm takes a population of one individual as argument
# The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html
# for more details about the rastrigin and other tests for CMA-ES
strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
halloffame = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
# Objects that will compile the data
sigma = numpy.ndarray((NGEN,1))
axis_ratio = numpy.ndarray((NGEN,1))
diagD = numpy.ndarray((NGEN,N))
fbest = numpy.ndarray((NGEN,1))
best = numpy.ndarray((NGEN,N))
std = numpy.ndarray((NGEN,N))
for gen in range(NGEN):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Update the hall of fame and the statistics with the
# currently evaluated population
halloffame.update(population)
record = stats.compile(population)
logbook.record(evals=len(population), gen=gen, **record)
if verbose:
print(logbook.stream)
# Save more data along the evolution for latter plotting
# diagD is sorted and sqrooted in the update method
sigma[gen] = strategy.sigma
axis_ratio[gen] = max(strategy.diagD)**2/min(strategy.diagD)**2
diagD[gen, :N] = strategy.diagD**2
fbest[gen] = halloffame[0].fitness.values
best[gen, :N] = halloffame[0]
std[gen, :N] = numpy.std(population, axis=0)
# The x-axis will be the number of evaluations
x = list(range(0, strategy.lambda_ * NGEN, strategy.lambda_))
avg, max_, min_ = logbook.select("avg", "max", "min")
plt.figure()
plt.subplot(2, 2, 1)
plt.semilogy(x, avg, "--b")
plt.semilogy(x, max_, "--b")
plt.semilogy(x, min_, "-b")
plt.semilogy(x, fbest, "-c")
plt.semilogy(x, sigma, "-g")
plt.semilogy(x, axis_ratio, "-r")
plt.grid(True)
plt.title("blue: f-values, green: sigma, red: axis ratio")
plt.subplot(2, 2, 2)
plt.plot(x, best)
plt.grid(True)
plt.title("Object Variables")
plt.subplot(2, 2, 3)
plt.semilogy(x, diagD)
plt.grid(True)
plt.title("Scaling (All Main Axes)")
plt.subplot(2, 2, 4)
plt.semilogy(x, std)
plt.grid(True)
plt.title("Standard Deviations in All Coordinates")
plt.show()
if __name__ == "__main__":
main(False)
| lgpl-3.0 |
qPCR4vir/orange | Orange/OrangeCanvas/application/tutorials/__init__.py | 6 | 3434 | """
Orange Canvas Tutorial schemes
"""
import os
import io
import logging
import types
import collections
from itertools import chain
import pkg_resources
log = logging.getLogger(__name__)
def list_schemes(package):
"""Return a list of scheme tutorials.
"""
resources = pkg_resources.resource_listdir(package.__name__, ".")
resources = filter(is_ows, resources)
return sorted(resources)
def is_ows(filename):
return filename.endswith(".ows")
def default_entry_point():
dist = pkg_resources.get_distribution("Orange")
ep = pkg_resources.EntryPoint("Orange Canvas", __name__, dist=dist)
return ep
def tutorial_entry_points():
"""Return an iterator over all tutorials.
"""
default = default_entry_point()
return chain([default],
pkg_resources.iter_entry_points("orange.widgets.tutorials"))
def tutorials():
"""Return all known tutorials.
"""
all_tutorials = []
for ep in tutorial_entry_points():
tutorials = None
try:
tutorials = ep.load()
except pkg_resources.DistributionNotFound as ex:
log.warning("Could not load tutorials from %r (%r)",
ep.dist, ex)
continue
except ImportError:
log.error("Could not load tutorials from %r",
ep.dist, exc_info=True)
continue
except Exception:
log.error("Could not load tutorials from %r",
ep.dist, exc_info=True)
continue
if isinstance(tutorials, types.ModuleType):
package = tutorials
tutorials = list_schemes(tutorials)
tutorials = [Tutorial(t, package, ep.dist) for t in tutorials]
elif isinstance(tutorials, (types.FunctionType, types.MethodType)):
try:
tutorials = tutorials()
except Exception as ex:
log.error("A callable entry point (%r) raised an "
"unexpected error.",
ex, exc_info=True)
continue
tutorials = [Tutorial(t, package=None, distribution=ep.dist)]
all_tutorials.extend(tutorials)
return all_tutorials
class Tutorial(object):
def __init__(self, resource, package=None, distribution=None):
self.resource = resource
self.package = package
self.distribution = distribution
def abspath(self):
"""Return absolute filename for the scheme if possible else
raise an ValueError.
"""
if self.package is not None:
return pkg_resources.resource_filename(self.package.__name__,
self.resource)
elif isinstance(self.resource, basestring):
if os.path.isabs(self.resource):
return self.resource
raise ValueError("cannot resolve resource to an absolute name")
def stream(self):
"""Return the tutorial file as an open stream.
"""
if self.package is not None:
return pkg_resources.resource_stream(self.package.__name__,
self.resource)
elif isinstance(self.resource, basestring):
if os.path.isabs(self.resource) and os.path.exists(self.resource):
return open(self.resource, "rb")
raise ValueError
| gpl-3.0 |
kamotos/django-storages | storages/backends/database.py | 10 | 5252 | # DatabaseStorage for django.
# 2009 (c) GameKeeper Gambling Ltd, Ivanov E.
from django.conf import settings
from django.core.files import File
from django.core.exceptions import ImproperlyConfigured
from storages.compat import urlparse, BytesIO, Storage
try:
import pyodbc
except ImportError:
raise ImproperlyConfigured("Could not load pyodbc dependency.\
\nSee http://code.google.com/p/pyodbc/")
REQUIRED_FIELDS = ('db_table', 'fname_column', 'blob_column', 'size_column', 'base_url')
class DatabaseStorage(Storage):
"""
Class DatabaseStorage provides storing files in the database.
"""
def __init__(self, option=settings.DB_FILES):
"""Constructor.
Constructs object using dictionary either specified in contucotr or
in settings.DB_FILES.
@param option dictionary with 'db_table', 'fname_column',
'blob_column', 'size_column', 'base_url' keys.
option['db_table']
Table to work with.
option['fname_column']
Column in the 'db_table' containing filenames (filenames can
contain pathes). Values should be the same as where FileField keeps
filenames.
It is used to map filename to blob_column. In sql it's simply
used in where clause.
option['blob_column']
Blob column (for example 'image' type), created manually in the
'db_table', used to store image.
option['size_column']
Column to store file size. Used for optimization of size()
method (another way is to open file and get size)
option['base_url']
Url prefix used with filenames. Should be mapped to the view,
that returns an image as result.
"""
if not option or not all([field in option for field in REQUIRED_FIELDS]):
raise ValueError("You didn't specify required options")
self.db_table = option['db_table']
self.fname_column = option['fname_column']
self.blob_column = option['blob_column']
self.size_column = option['size_column']
self.base_url = option['base_url']
#get database settings
self.DATABASE_ODBC_DRIVER = settings.DATABASE_ODBC_DRIVER
self.DATABASE_NAME = settings.DATABASE_NAME
self.DATABASE_USER = settings.DATABASE_USER
self.DATABASE_PASSWORD = settings.DATABASE_PASSWORD
self.DATABASE_HOST = settings.DATABASE_HOST
self.connection = pyodbc.connect('DRIVER=%s;SERVER=%s;DATABASE=%s;UID=%s;PWD=%s'%(self.DATABASE_ODBC_DRIVER,self.DATABASE_HOST,self.DATABASE_NAME,
self.DATABASE_USER, self.DATABASE_PASSWORD) )
self.cursor = self.connection.cursor()
def _open(self, name, mode='rb'):
"""Open a file from database.
@param name filename or relative path to file based on base_url. path should contain only "/", but not "\". Apache sends pathes with "/".
If there is no such file in the db, returs None
"""
assert mode == 'rb', "You've tried to open binary file without specifying binary mode! You specified: %s"%mode
row = self.cursor.execute("SELECT %s from %s where %s = '%s'"%(self.blob_column,self.db_table,self.fname_column,name) ).fetchone()
if row is None:
return None
inMemFile = BytesIO(row[0])
inMemFile.name = name
inMemFile.mode = mode
retFile = File(inMemFile)
return retFile
def _save(self, name, content):
"""Save 'content' as file named 'name'.
@note '\' in path will be converted to '/'.
"""
name = name.replace('\\', '/')
binary = pyodbc.Binary(content.read())
size = len(binary)
#todo: check result and do something (exception?) if failed.
if self.exists(name):
self.cursor.execute("UPDATE %s SET %s = ?, %s = ? WHERE %s = '%s'"%(self.db_table,self.blob_column,self.size_column,self.fname_column,name),
(binary, size) )
else:
self.cursor.execute("INSERT INTO %s VALUES(?, ?, ?)"%(self.db_table), (name, binary, size) )
self.connection.commit()
return name
def exists(self, name):
row = self.cursor.execute("SELECT %s from %s where %s = '%s'"%(self.fname_column,self.db_table,self.fname_column,name)).fetchone()
return row is not None
def get_available_name(self, name, max_length=None):
return name
def delete(self, name):
if self.exists(name):
self.cursor.execute("DELETE FROM %s WHERE %s = '%s'"%(self.db_table,self.fname_column,name))
self.connection.commit()
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self.base_url, name).replace('\\', '/')
def size(self, name):
row = self.cursor.execute("SELECT %s from %s where %s = '%s'"%(self.size_column,self.db_table,self.fname_column,name)).fetchone()
if row is None:
return 0
else:
return int(row[0])
| bsd-3-clause |
chauhanhardik/populo | common/lib/xmodule/xmodule/modulestore/store_utilities.py | 124 | 4490 | import re
import logging
from collections import namedtuple
import uuid
def _prefix_only_url_replace_regex(pattern):
"""
Match urls in quotes pulling out the fields from pattern
"""
return re.compile(ur"""
(?x) # flags=re.VERBOSE
(?P<quote>\\?['"]) # the opening quotes
{}
(?P=quote) # the first matching closing quote
""".format(pattern))
def rewrite_nonportable_content_links(source_course_id, dest_course_id, text):
"""
rewrite any non-portable links to (->) relative links:
/c4x/<org>/<course>/asset/<name> -> /static/<name>
/jump_to/i4x://<org>/<course>/<category>/<name> -> /jump_to_id/<id>
"""
def portable_asset_link_subtitution(match):
quote = match.group('quote')
block_id = match.group('block_id')
return quote + '/static/' + block_id + quote
def portable_jump_to_link_substitution(match):
quote = match.group('quote')
rest = match.group('block_id')
return quote + '/jump_to_id/' + rest + quote
# if something blows up, log the error and continue
# create a serialized template for what the id will look like in the source_course but with
# the block_id as a regex pattern
placeholder_id = uuid.uuid4().hex
asset_block_pattern = unicode(source_course_id.make_asset_key('asset', placeholder_id))
asset_block_pattern = asset_block_pattern.replace(placeholder_id, r'(?P<block_id>.*?)')
try:
text = _prefix_only_url_replace_regex(asset_block_pattern).sub(portable_asset_link_subtitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", asset_block_pattern, text, str(exc))
placeholder_category = 'cat_{}'.format(uuid.uuid4().hex)
usage_block_pattern = unicode(source_course_id.make_usage_key(placeholder_category, placeholder_id))
usage_block_pattern = usage_block_pattern.replace(placeholder_category, r'(?P<category>[^/+@]+)')
usage_block_pattern = usage_block_pattern.replace(placeholder_id, r'(?P<block_id>.*?)')
jump_to_link_base = ur'/courses/{course_key_string}/jump_to/{usage_key_string}'.format(
course_key_string=unicode(source_course_id), usage_key_string=usage_block_pattern
)
try:
text = _prefix_only_url_replace_regex(jump_to_link_base).sub(portable_jump_to_link_substitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", jump_to_link_base, text, str(exc))
# Also, there commonly is a set of link URL's used in the format:
# /courses/<org>/<course>/<name> which will be broken if migrated to a different course_id
# so let's rewrite those, but the target will also be non-portable,
#
# Note: we only need to do this if we are changing course-id's
#
if source_course_id != dest_course_id:
try:
generic_courseware_link_base = u'/courses/{}/'.format(unicode(source_course_id))
text = re.sub(_prefix_only_url_replace_regex(generic_courseware_link_base), portable_asset_link_subtitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", source_course_id, text, str(exc))
return text
def draft_node_constructor(module, url, parent_url, location=None, parent_location=None, index=None):
"""
Contructs a draft_node namedtuple with defaults.
"""
draft_node = namedtuple('draft_node', ['module', 'location', 'url', 'parent_location', 'parent_url', 'index'])
return draft_node(module, location, url, parent_location, parent_url, index)
def get_draft_subtree_roots(draft_nodes):
"""
Takes a list of draft_nodes, which are namedtuples, each of which identify
itself and its parent.
If a draft_node is in `draft_nodes`, then we expect for all its children
should be in `draft_nodes` as well. Since `_import_draft` is recursive,
we only want to import the roots of any draft subtrees contained in
`draft_nodes`.
This generator yields those roots.
"""
urls = [draft_node.url for draft_node in draft_nodes]
for draft_node in draft_nodes:
if draft_node.parent_url not in urls:
yield draft_node
| agpl-3.0 |
maartenq/ansible | lib/ansible/parsing/utils/yaml.py | 96 | 2809 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from yaml import YAMLError
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.module_utils._text import to_native
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
from ansible.parsing.ajson import AnsibleJSONDecoder
__all__ = ('from_yaml',)
def _handle_error(yaml_exc, file_name, show_content):
'''
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
'''
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
err_msg = getattr(yaml_exc, 'problem', '')
raise AnsibleParserError(YAML_SYNTAX_ERROR % to_native(err_msg), obj=err_obj, show_content=show_content, orig_exc=yaml_exc)
def _safe_load(stream, file_name=None, vault_secrets=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
loader = AnsibleLoader(stream, file_name, vault_secrets)
try:
return loader.get_single_data()
finally:
try:
loader.dispose()
except AttributeError:
pass # older versions of yaml don't have dispose function, ignore
def from_yaml(data, file_name='<string>', show_content=True, vault_secrets=None):
'''
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
'''
new_data = None
try:
# in case we have to deal with vaults
AnsibleJSONDecoder.set_secrets(vault_secrets)
# we first try to load this data as JSON.
# Fixes issues with extra vars json strings not being parsed correctly by the yaml parser
new_data = json.loads(data, cls=AnsibleJSONDecoder)
except Exception:
# must not be JSON, let the rest try
try:
new_data = _safe_load(data, file_name=file_name, vault_secrets=vault_secrets)
except YAMLError as yaml_exc:
_handle_error(yaml_exc, file_name, show_content)
return new_data
| gpl-3.0 |
andela-ooladayo/django | tests/sitemaps_tests/test_http.py | 205 | 10731 | from __future__ import unicode_literals
import os
from datetime import date
from unittest import skipUnless
from django.apps import apps
from django.conf import settings
from django.contrib.sitemaps import GenericSitemap, Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import ignore_warnings, modify_settings, override_settings
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
from .base import SitemapTestsBase
from .models import TestModel
class HTTPSitemapTests(SitemapTestsBase):
@ignore_warnings(category=RemovedInDjango110Warning)
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
# The URL for views.sitemap in tests/urls/http.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
}])
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
# The URL for views.sitemap in tests/urls/http.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
}])
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_sitemap_last_modified(self):
"Tests that Last-Modified header is set correctly"
response = self.client.get('/lastmod/sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
def test_sitemap_last_modified_date(self):
"""
The Last-Modified header should be support dates (without time).
"""
response = self.client.get('/lastmod/date-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT')
def test_sitemap_last_modified_tz(self):
"""
The Last-Modified header should be converted from timezone aware dates
to GMT.
"""
response = self.client.get('/lastmod/tz-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT')
def test_sitemap_last_modified_missing(self):
"Tests that Last-Modified header is missing when sitemap has no lastmod"
response = self.client.get('/generic/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemap_last_modified_mixed(self):
"Tests that Last-Modified header is omitted when lastmod not on all items"
response = self.client.get('/lastmod-mixed/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception.
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless(apps.is_installed('django.contrib.sites'),
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = GenericSitemap({'queryset': TestModel.objects.all()})
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
Check that a cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_x_robots_sitemap(self):
# The URL for views.sitemap in tests/urls/http.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
def test_empty_sitemap(self):
response = self.client.get('/empty/sitemap.xml')
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese')))
def test_simple_i18nsitemap_index(self):
"A simple i18n sitemap index can be rendered"
response = self.client.get('/simple/i18n.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""".format(self.base_url, self.i18n_model.pk)
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
| bsd-3-clause |
nburn42/tensorflow | tensorflow/contrib/learn/python/learn/estimators/regression_test.py | 137 | 1879 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear regression tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
class RegressionTest(test.TestCase):
"""Linear regression tests."""
def testLinearRegression(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
optimizer="SGD")
regressor.fit(x, y, steps=200)
self.assertIn("linear//weight", regressor.get_variable_names())
regressor_weights = regressor.get_variable_value("linear//weight")
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=0.01)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
if __name__ == "__main__":
test.main()
| apache-2.0 |
shovalsa/shecodes_website | scsite/forms.py | 1 | 1664 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import ModelForm
from .models import Profile
from django.utils.translation import ugettext_lazy as _
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text=_('Optional'), label=_('First name'))
last_name = forms.CharField(max_length=30, required=False, help_text=_('Optional'), label=_('Last name'))
email = forms.EmailField(max_length=254, help_text=_('Required. Inform a valid email address.'), label=_('Email address'))
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )
class ProfileForm(ModelForm):
class Meta:
model = Profile
fields = ('track', 'main_branch', 'personal_background', 'avatar')
class UpdateUser(ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class MyForm(forms.Form):
def as_contact(self):
return self._html_output(
normal_row='<p%(html_class_attr)s> %(label)s <br> %(field)s %(help_text)s </p>',
error_row='%s',
row_ender='</p>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
class ContactForm(MyForm):
contact_name = forms.CharField(required=True, label=_("Name"))
contact_email = forms.EmailField(required=True, label=_("Email"))
subject = forms.CharField(required=True, label=_('Subject'))
message = forms.CharField(required=True, widget=forms.Textarea, label=_('Message'))
| apache-2.0 |
Peerapps/Peerapps | bitcoin/bloom.py | 18 | 6091 | # Copyright (C) 2013-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Bloom filter support"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
import sys
import math
import bitcoin.core
import bitcoin.core.serialize
def _ROTL32(x, r):
assert x <= 0xFFFFFFFF
return ((x << r) & 0xFFFFFFFF) | (x >> (32 - r))
def MurmurHash3(nHashSeed, vDataToHash):
"""MurmurHash3 (x86_32)
Used for bloom filters. See http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
"""
assert nHashSeed <= 0xFFFFFFFF
h1 = nHashSeed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
i = 0
while i < len(vDataToHash) - len(vDataToHash) % 4 \
and len(vDataToHash) - i >= 4:
k1 = struct.unpack(b"<L", vDataToHash[i:i+4])[0]
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = _ROTL32(k1, 15)
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
h1 = _ROTL32(h1, 13)
h1 = (((h1*5) & 0xFFFFFFFF) + 0xe6546b64) & 0xFFFFFFFF
i += 4
# tail
k1 = 0
j = (len(vDataToHash) // 4) * 4
import sys
bord = ord
if sys.version > '3':
# In Py3 indexing bytes returns numbers, not characters
bord = lambda x: x
if len(vDataToHash) & 3 >= 3:
k1 ^= bord(vDataToHash[j+2]) << 16
if len(vDataToHash) & 3 >= 2:
k1 ^= bord(vDataToHash[j+1]) << 8
if len(vDataToHash) & 3 >= 1:
k1 ^= bord(vDataToHash[j])
k1 &= 0xFFFFFFFF
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = _ROTL32(k1, 15)
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
# finalization
h1 ^= len(vDataToHash) & 0xFFFFFFFF
h1 ^= (h1 & 0xFFFFFFFF) >> 16
h1 *= 0x85ebca6b
h1 ^= (h1 & 0xFFFFFFFF) >> 13
h1 *= 0xc2b2ae35
h1 ^= (h1 & 0xFFFFFFFF) >> 16
return h1 & 0xFFFFFFFF
class CBloomFilter(bitcoin.core.serialize.Serializable):
# 20,000 items with fp rate < 0.1% or 10,000 items and <0.0001%
MAX_BLOOM_FILTER_SIZE = 36000
MAX_HASH_FUNCS = 50
UPDATE_NONE = 0
UPDATE_ALL = 1
UPDATE_P2PUBKEY_ONLY = 2
UPDATE_MASK = 3
def __init__(self, nElements, nFPRate, nTweak, nFlags):
"""Create a new bloom filter
The filter will have a given false-positive rate when filled with the
given number of elements.
Note that if the given parameters will result in a filter outside the
bounds of the protocol limits, the filter created will be as close to
the given parameters as possible within the protocol limits. This will
apply if nFPRate is very low or nElements is unreasonably high.
nTweak is a constant which is added to the seed value passed to the
hash function It should generally always be a random value (and is
largely only exposed for unit testing)
nFlags should be one of the UPDATE_* enums (but not _MASK)
"""
LN2SQUARED = 0.4804530139182014246671025263266649717305529515945455
LN2 = 0.6931471805599453094172321214581765680755001343602552
self.vData = bytearray(int(min(-1 / LN2SQUARED * nElements * math.log(nFPRate), self.MAX_BLOOM_FILTER_SIZE * 8) / 8))
self.nHashFuncs = int(min(len(self.vData) * 8 / nElements * LN2, self.MAX_HASH_FUNCS))
self.nTweak = nTweak
self.nFlags = nFlags
def bloom_hash(self, nHashNum, vDataToHash):
return MurmurHash3(((nHashNum * 0xFBA4C795) + self.nTweak) & 0xFFFFFFFF, vDataToHash) % (len(self.vData) * 8)
__bit_mask = bytearray([0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80])
def insert(self, elem):
"""Insert an element in the filter.
elem may be a COutPoint or bytes
"""
if isinstance(elem, bitcoin.core.COutPoint):
elem = elem.serialize()
if len(self.vData) == 1 and self.vData[0] == 0xff:
return
for i in range(0, self.nHashFuncs):
nIndex = self.bloom_hash(i, elem)
# Sets bit nIndex of vData
self.vData[nIndex >> 3] |= self.__bit_mask[7 & nIndex]
def contains(self, elem):
"""Test if the filter contains an element
elem may be a COutPoint or bytes
"""
if isinstance(elem, bitcoin.core.COutPoint):
elem = elem.serialize()
if len(self.vData) == 1 and self.vData[0] == 0xff:
return True
for i in range(0, self.nHashFuncs):
nIndex = self.bloom_hash(i, elem)
if not (self.vData[nIndex >> 3] & self.__bit_mask[7 & nIndex]):
return False
return True
def IsWithinSizeConstraints(self):
return len(self.vData) <= self.MAX_BLOOM_FILTER_SIZE and self.nHashFuncs <= self.MAX_HASH_FUNCS
def IsRelevantAndUpdate(tx, tx_hash):
# Not useful for a client, so not implemented yet.
raise NotImplementedError
__struct = struct.Struct(b'<IIB')
@classmethod
def stream_deserialize(cls, f):
vData = bitcoin.core.serialize.BytesSerializer.stream_deserialize(f)
(nHashFuncs,
nTweak,
nFlags) = self.__struct.unpack(_ser_read(f, self.__struct.size))
self = cls()
self.vData = vData
self.nHashFuncs = nHashFuncs
self.nTweak = nTweak
self.nFlags = nFlags
return self
def stream_serialize(self, f):
if sys.version > '3':
bitcoin.core.serialize.BytesSerializer.stream_serialize(self.vData, f)
else:
# 2.7 has problems with f.write(bytearray())
bitcoin.core.serialize.BytesSerializer.stream_serialize(bytes(self.vData), f)
f.write(self.__struct.pack(self.nHashFuncs, self.nTweak, self.nFlags))
__all__ = (
'MurmurHash3',
'CBloomFilter',
)
| mit |
binarydud/django-oscar | src/oscar/apps/dashboard/app.py | 34 | 2262 | from django.conf.urls import url, include
from oscar.core.application import Application
from oscar.core.loading import get_class
class DashboardApplication(Application):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', include(self.catalogue_app.urls)),
url(r'^reports/', include(self.reports_app.urls)),
url(r'^orders/', include(self.orders_app.urls)),
url(r'^users/', include(self.users_app.urls)),
url(r'^content-blocks/', include(self.promotions_app.urls)),
url(r'^pages/', include(self.pages_app.urls)),
url(r'^partners/', include(self.partners_app.urls)),
url(r'^offers/', include(self.offers_app.urls)),
url(r'^ranges/', include(self.ranges_app.urls)),
url(r'^reviews/', include(self.reviews_app.urls)),
url(r'^vouchers/', include(self.vouchers_app.urls)),
url(r'^comms/', include(self.comms_app.urls)),
url(r'^shipping/', include(self.shipping_app.urls)),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| bsd-3-clause |
csm0042/rpihome_v3 | rpihome_v3/helpers/log_support.py | 1 | 2439 | #!/usr/bin/python3
""" logging.py: Log file setup helper functions
"""
# Import Required Libraries (Standard, Third Party, Local) ********************
import logging
import logging.handlers
import os
import sys
# Authorship Info *************************************************************
__author__ = "Christopher Maue"
__copyright__ = "Copyright 2016, The Maue-Home Project"
__credits__ = ["Christopher Maue"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Christopher Maue"
__email__ = "csmaue@gmail.com"
__status__ = "Development"
# Logging helper functions ****************************************************
def setup_log_handlers(name, debug_logfile, info_logfile):
""" Function to configure root logger with three handlers, one to stream
info and up messages, plus two additional file handlers for debug and info
messages """
root = logging.getLogger(name)
root.setLevel(logging.DEBUG)
root.handlers = []
# Check if log files exist, create them if they don't
os.makedirs(os.path.dirname(debug_logfile), exist_ok=True)
os.makedirs(os.path.dirname(info_logfile), exist_ok=True)
# Create desired handlers
debug_handler = logging.handlers.TimedRotatingFileHandler(
debug_logfile, when='d', interval=1, backupCount=7)
info_handler = logging.handlers.TimedRotatingFileHandler(
info_logfile, when='d', interval=1, backupCount=7)
console_handler = logging.StreamHandler(sys.stdout)
# Set logging levels for each handler
debug_handler.setLevel(logging.DEBUG)
info_handler.setLevel(logging.INFO)
console_handler.setLevel(logging.INFO)
# Create individual formats for each handler
debug_formatter = logging.Formatter(
'%(asctime)-25s %(levelname)-10s %(filename)-18s %(funcName)-22s %(message)s')
info_formatter = logging.Formatter(
'%(asctime)-25s %(levelname)-10s %(filename)-18s %(funcName)-22s %(message)s')
console_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
# Set formatting options for each handler
debug_handler.setFormatter(debug_formatter)
info_handler.setFormatter(info_formatter)
console_handler.setFormatter(console_formatter)
# Add handlers to root logger
root.addHandler(debug_handler)
root.addHandler(info_handler)
root.addHandler(console_handler)
root.debug("logging configured with 3 handlers")
return root
| gpl-3.0 |
kamalx/edx-platform | cms/djangoapps/contentstore/features/course-export.py | 62 | 2771 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
from lettuce import world, step
from component_settings_editor_helpers import enter_xml_in_advanced_problem
from nose.tools import assert_true, assert_equal
from contentstore.utils import reverse_usage_url
@step('I go to the export page$')
def i_go_to_the_export_page(step):
world.click_tools()
link_css = 'li.nav-course-tools-export a'
world.css_click(link_css)
@step('I export the course$')
def i_export_the_course(step):
step.given('I go to the export page')
world.css_click('a.action-export')
@step('I edit and enter bad XML$')
def i_enter_bad_xml(step):
enter_xml_in_advanced_problem(
step,
"""<problem><h1>Smallest Canvas</h1>
<p>You want to make the smallest canvas you can.</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false"><verbatim><canvas id="myCanvas" width = 10 height = 100> </canvas></verbatim></choice>
<choice correct="true"><code><canvas id="myCanvas" width = 10 height = 10> </canvas></code></choice>
</choicegroup>
</multiplechoiceresponse>
</problem>"""
)
@step('I edit and enter an ampersand$')
def i_enter_an_ampersand(step):
enter_xml_in_advanced_problem(step, "<problem>&</problem>")
@step('I get an error dialog$')
def get_an_error_dialog(step):
assert_true(world.is_css_present("div.prompt.error"))
@step('I can click to go to the unit with the error$')
def i_click_on_error_dialog(step):
world.wait_for_visible(".button.action-primary")
world.click_link_by_text('Correct failed component')
problem_string = unicode(world.scenario_dict['COURSE'].id.make_usage_key("problem", 'ignore'))
problem_string = u"Problem {}".format(problem_string[:problem_string.rfind('ignore')])
assert_true(
world.css_html("span.inline-error").startswith(problem_string),
u"{} does not start with {}".format(
world.css_html("span.inline-error"), problem_string
))
# we don't know the actual ID of the vertical. So just check that we did go to a
# vertical page in the course (there should only be one).
vertical_usage_key = world.scenario_dict['COURSE'].id.make_usage_key("vertical", "test")
vertical_url = reverse_usage_url('container_handler', vertical_usage_key)
# Remove the trailing "/None" from the URL - we don't know the course ID, so we just want to
# check that we visited a vertical URL.
if vertical_url.endswith("/test") or vertical_url.endswith("@test"):
vertical_url = vertical_url[:-5]
assert_equal(1, world.browser.url.count(vertical_url))
| agpl-3.0 |
sungmaster/teajudge | sandbox/pysandbox/packages/sandbox/__init__.py | 2 | 9491 | ################################################################################
# The Sandbox Libraries (Python) Package Initializer #
# #
# Copyright (C) 2004-2009, 2011-2013 LIU Yu, pineapple.liu@gmail.com #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# 1. Redistributions of source code must retain the above copyright notice, #
# this list of conditions and the following disclaimer. #
# #
# 2. Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# 3. Neither the name of the author(s) nor the names of its contributors may #
# be used to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE #
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE #
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR #
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF #
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS #
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN #
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) #
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
################################################################################
"""The Sandbox Libraries (Python)
The sandbox libraries (libsandbox & pysandbox) provide API's in C/C++ and Python
for executing and profiling simple (single process) programs in a restricted
environment, or sandbox. These API's can help developers to build automated
profiling tools and watchdogs that capture and block the runtime behaviours of
binary programs according to configurable / programmable policies.
The sandbox libraries are distributed under the terms of the New BSD license
please refer to the plain text file named COPYING in individual packages.
Project Homepage: http://openjudge.net/~liuyu/Project/LibSandbox
GETTING STARTED
$ python
>>> from sandbox import *
>>> s = Sandbox(["/foo/bar.exe", "arg1", "arg2"])
>>> s.run()
...
>>> s.probe()
...
"""
from . import _sandbox
from ._sandbox import SandboxEvent, SandboxAction, SandboxPolicy
from ._sandbox import __version__, __author__
class Sandbox(_sandbox.Sandbox):
def __init__(self, *args, **kwds):
# Since 0.3.4-3, sandbox initialization is completed within the
# __new__() method rather than in __init__(). And initialized sandbox
# objects expose new *policy* attributes to support policy assignment.
# While this ensures the atomicity of sandbox initialization, it also
# breaks backward compatibility with applications that subclass Sandbox
# and monkey-patch the *policy* argument in down-stream __init__()
# methods. The following code assumes the old-style *policy patching*,
# and emulates it with the new-style *policy assignment*.
super(Sandbox, self).__init__()
if 'policy' in kwds:
if isinstance(kwds['policy'], SandboxPolicy):
self.policy = kwds['policy']
pass
def run(self):
"""Execute the sandboxed program. This method blocks the calling
program until the sandboxed program is finished (or terminated).
"""
return super(Sandbox, self).run()
def dump(self, typeid, address):
"""Copy the memory block starting from the specificed address of
the sandboxed program's memory space. On success, return an object
built from the obtained data. Possble typeid's and corresponding
return types are as follows,
- T_CHAR, T_BYTE, T_UBYTE: int
- T_SHORT, T_USHORT, T_INT, T_UINT, T_LONG, T_ULONG: int
- T_FLOAT, T_DOUBLE: float
- T_STRING: str
On failure, return None in case the specified address is invalid
for dump, otherwise raise an exception.
"""
try:
return super(Sandbox, self).dump(typeid, address)
except ValueError:
return None
pass
def probe(self, compatible=True):
"""Return a dictionary containing runtime statistics of the sandboxed
program. By default, the result contains the following entries,
- cpu_info (4-tuple):
0 (int): cpu clock time usage (msec)
1 (int): cpu time usage in user mode (msec)
2 (int): cpu time usage in kernel mode (msec)
3 (long): time-stamp counter (# of instructions)
- mem_info (6-tuple):
0 (int): runtime virtual memory usage (kilobytes)
1 (int): peak virtual memory usage (kilobytes)
2 (int): runtime resident set size (kilobytes)
3 (int): peak resident set size (kilobytes)
4 (int): minor page faults (# of pages)
5 (int): major page faults (# of pages)
- signal_info (2-tuple):
0 (int): last / current signal number
1 (int): last / current signal code
- syscall_info (2-tuple):
0 (int): last / current system call number
1 (int): last / current system call mode
- elapsed (int): elapsed wallclock time since started (msec)
- exitcode (int): exit status of the sandboxed program
When the optional argument *compatible* is True, the result
additionally contains the following entries,
- cpu (int): cpu time usage (msec)
- cpu.usr (int): cpu time usage in user mode (msec)
- cpu.sys (int): cpu time usage in kernel mode (msec)
- cpu.tsc (long): time-stamp counter (# of instructions)
- mem.vsize (int): peak virtual memory usage (kilobytes)
- mem.rss (int): peak resident set size (kilobytes)
- mem.minflt (int): minor page faults (# of pages)
- mem.majflt (int): major page faults (# of pages)
- signal (int): last / current signal number
- syscall (int): last / current system call number
"""
data = super(Sandbox, self).probe()
if data and compatible:
# cpu_info
ctime, utime, stime, tsc = data['cpu_info'][:4]
data['cpu'] = ctime
data['cpu.usr'] = utime
data['cpu.sys'] = stime
data['cpu.tsc'] = tsc
# mem_info
vm, vmpeak, rss, rsspeak, minflt, majflt = data['mem_info'][:6]
data['mem.vsize'] = vmpeak
data['mem.rss'] = rsspeak
data['mem.minflt'] = minflt
data['mem.majflt'] = majflt
# signal_info
signo, code = data['signal_info']
data['signal'] = signo
# syscall_info
scno, mode = data['syscall_info']
data['syscall'] = scno
return data
pass
# sandbox event types
S_EVENT_ERROR = SandboxEvent.S_EVENT_ERROR
S_EVENT_EXIT = SandboxEvent.S_EVENT_EXIT
S_EVENT_SIGNAL = SandboxEvent.S_EVENT_SIGNAL
S_EVENT_SYSCALL = SandboxEvent.S_EVENT_SYSCALL
S_EVENT_SYSRET = SandboxEvent.S_EVENT_SYSRET
S_EVENT_QUOTA = SandboxEvent.S_EVENT_QUOTA
# sandbox action types
S_ACTION_CONT = SandboxAction.S_ACTION_CONT
S_ACTION_KILL = SandboxAction.S_ACTION_KILL
S_ACTION_FINI = SandboxAction.S_ACTION_FINI
# sandbox quota types
S_QUOTA_WALLCLOCK = Sandbox.S_QUOTA_WALLCLOCK
S_QUOTA_CPU = Sandbox.S_QUOTA_CPU
S_QUOTA_MEMORY = Sandbox.S_QUOTA_MEMORY
S_QUOTA_DISK = Sandbox.S_QUOTA_DISK
# sandbox status
S_STATUS_PRE = Sandbox.S_STATUS_PRE
S_STATUS_RDY = Sandbox.S_STATUS_RDY
S_STATUS_EXE = Sandbox.S_STATUS_EXE
S_STATUS_BLK = Sandbox.S_STATUS_BLK
S_STATUS_FIN = Sandbox.S_STATUS_FIN
# sandbox native results
S_RESULT_PD = Sandbox.S_RESULT_PD
S_RESULT_OK = Sandbox.S_RESULT_OK
S_RESULT_RF = Sandbox.S_RESULT_RF
S_RESULT_ML = Sandbox.S_RESULT_ML
S_RESULT_OL = Sandbox.S_RESULT_OL
S_RESULT_TL = Sandbox.S_RESULT_TL
S_RESULT_RT = Sandbox.S_RESULT_RT
S_RESULT_AT = Sandbox.S_RESULT_AT
S_RESULT_IE = Sandbox.S_RESULT_IE
S_RESULT_BP = Sandbox.S_RESULT_BP
S_RESULT_R0 = Sandbox.S_RESULT_R0
S_RESULT_R1 = Sandbox.S_RESULT_R1
S_RESULT_R2 = Sandbox.S_RESULT_R2
S_RESULT_R3 = Sandbox.S_RESULT_R3
S_RESULT_R4 = Sandbox.S_RESULT_R4
S_RESULT_R5 = Sandbox.S_RESULT_R5
# datatype indicators
T_BYTE = Sandbox.T_BYTE
T_SHORT = Sandbox.T_SHORT
T_INT = Sandbox.T_INT
T_LONG = Sandbox.T_LONG
T_UBYTE = Sandbox.T_UBYTE
T_USHORT = Sandbox.T_USHORT
T_UINT = Sandbox.T_UINT
T_ULONG = Sandbox.T_ULONG
T_FLOAT = Sandbox.T_FLOAT
T_DOUBLE = Sandbox.T_DOUBLE
T_CHAR = Sandbox.T_CHAR
T_STRING = Sandbox.T_STRING
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.